diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 975787a8f5e734..2527b143128967 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -358,7 +358,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef OrigRegs, if (PartLLT.isVector() == LLTy.isVector() && PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() && (!PartLLT.isVector() || - PartLLT.getNumElements() == LLTy.getNumElements()) && + PartLLT.getElementCount() == LLTy.getElementCount()) && OrigRegs.size() == 1 && Regs.size() == 1) { Register SrcReg = Regs[0]; @@ -406,6 +406,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef OrigRegs, // If PartLLT is a mismatched vector in both number of elements and element // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to // have the same elt type, i.e. v4s32. + // TODO: Extend this coersion to element multiples other than just 2. if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() && PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 && Regs.size() == 1) { diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 5b4e2b725e1dd7..80e9c08e850b68 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -1065,16 +1065,16 @@ void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, #ifndef NDEBUG if (DstTy.isVector()) { assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); - assert(SrcTy.getNumElements() == DstTy.getNumElements() && + assert(SrcTy.getElementCount() == DstTy.getElementCount() && "different number of elements in a trunc/ext"); } else assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); if (IsExtend) - assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && + assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) && "invalid narrowing extend"); else - assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && + assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) && "invalid widening trunc"); #endif } diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp index 24c30b756737b2..cd85bf606989f9 100644 --- a/llvm/lib/CodeGen/LowLevelType.cpp +++ b/llvm/lib/CodeGen/LowLevelType.cpp @@ -17,7 +17,7 @@ using namespace llvm; LLT::LLT(MVT VT) { if (VT.isVector()) { - bool asVector = VT.getVectorMinNumElements() > 1; + bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector(); init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector, VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(), /*AddressSpace=*/0); diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index da1d9c6f0679c7..6107fa5c43c57f 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -965,7 +965,7 @@ bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, return false; } - if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) { + if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) { report("operand types must preserve number of vector elements", MI); return false; } diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp index a1dbc21ca36466..e73d8863963d0b 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp @@ -14,6 +14,7 @@ #include "RISCVCallLowering.h" #include "RISCVISelLowering.h" +#include "RISCVMachineFunctionInfo.h" #include "RISCVSubtarget.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -185,6 +186,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner { const DataLayout &DL = MF.getDataLayout(); const RISCVSubtarget &Subtarget = MF.getSubtarget(); + if (LocVT.isScalableVector()) + MF.getInfo()->setIsVectorCall(); + if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT, LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty, *Subtarget.getTargetLowering(), @@ -301,8 +305,31 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler { RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) : CallLowering(&TLI) {} +/// Return true if scalable vector with ScalarTy is legal for lowering. +static bool isLegalElementTypeForRVV(Type *EltTy, + const RISCVSubtarget &Subtarget) { + if (EltTy->isPointerTy()) + return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true; + if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) || + EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32)) + return true; + if (EltTy->isIntegerTy(64)) + return Subtarget.hasVInstructionsI64(); + if (EltTy->isHalfTy()) + return Subtarget.hasVInstructionsF16(); + if (EltTy->isBFloatTy()) + return Subtarget.hasVInstructionsBF16(); + if (EltTy->isFloatTy()) + return Subtarget.hasVInstructionsF32(); + if (EltTy->isDoubleTy()) + return Subtarget.hasVInstructionsF64(); + return false; +} + // TODO: Support all argument types. -static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) { +// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall. +static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget, + bool IsLowerArgs = false) { // TODO: Integers larger than 2*XLen are passed indirectly which is not // supported yet. if (T->isIntegerTy()) @@ -311,6 +338,11 @@ static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) { return true; if (T->isPointerTy()) return true; + // TODO: Support fixed vector types. + if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() && + T->isScalableTy() && + isLegalElementTypeForRVV(T->getScalarType(), Subtarget)) + return true; return false; } @@ -398,7 +430,8 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const RISCVSubtarget &Subtarget = MIRBuilder.getMF().getSubtarget(); for (auto &Arg : F.args()) { - if (!isSupportedArgumentType(Arg.getType(), Subtarget)) + if (!isSupportedArgumentType(Arg.getType(), Subtarget, + /*IsLowerArgs=*/true)) return false; } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll index a3a913d8ce02d8..2ad068eb7dc3d5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll @@ -9,7 +9,7 @@ declare @llvm.riscv.vadd.nxv1i8.nxv1i8( , i64) -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to lower arguments{{.*}}scalable_arg +; FALLBACK_WITH_REPORT_ERR: :0:0: unable to translate instruction: call: ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg define @scalable_arg( %0, %1, i64 %2) nounwind { entry: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll new file mode 100644 index 00000000000000..f39e7793e5d4f3 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll @@ -0,0 +1,16 @@ +; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s 2>&1 | FileCheck %s +; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s 2>&1 | FileCheck %s + +; The purpose of this test is to show that the compiler throws an error when +; there is no support for bf16 vectors. If the compiler did not throw an error, +; then it will try to scalarize the argument to an s32, which may drop elements. +define void @test_args_nxv1bf16( %a) { +entry: + ret void +} + +; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1bf16) + + diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll new file mode 100644 index 00000000000000..042b455bfb5475 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll @@ -0,0 +1,16 @@ +; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s 2>&1 | FileCheck %s +; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s 2>&1 | FileCheck %s + +; The purpose of this test is to show that the compiler throws an error when +; there is no support for f16 vectors. If the compiler did not throw an error, +; then it will try to scalarize the argument to an s32, which may drop elements. +define void @test_args_nxv1f16( %a) { +entry: + ret void +} + +; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1f16) + + diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll new file mode 100644 index 00000000000000..4df0a8f48cc8d0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll @@ -0,0 +1,909 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \ +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s + +; ========================================================================== +; ============================= Scalable Types ============================= +; ========================================================================== + +define void @test_args_nxv1i8( %a) { + ; RV32-LABEL: name: test_args_nxv1i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2i8( %a) { + ; RV32-LABEL: name: test_args_nxv2i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4i8( %a) { + ; RV32-LABEL: name: test_args_nxv4i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8i8( %a) { + ; RV32-LABEL: name: test_args_nxv8i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16i8( %a) { + ; RV32-LABEL: name: test_args_nxv16i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv32i8( %a) { + ; RV32-LABEL: name: test_args_nxv32i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv32i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv64i8( %a) { + ; RV32-LABEL: name: test_args_nxv64i8 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv64i8 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1i16( %a) { + ; RV32-LABEL: name: test_args_nxv1i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2i16( %a) { + ; RV32-LABEL: name: test_args_nxv2i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4i16( %a) { + ; RV32-LABEL: name: test_args_nxv4i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8i16( %a) { + ; RV32-LABEL: name: test_args_nxv8i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16i16( %a) { + ; RV32-LABEL: name: test_args_nxv16i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv32i16( %a) { + ; RV32-LABEL: name: test_args_nxv32i16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv32i16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1i32( %a) { + ; RV32-LABEL: name: test_args_nxv1i32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1i32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2i32( %a) { + ; RV32-LABEL: name: test_args_nxv2i32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2i32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4i32( %a) { + ; RV32-LABEL: name: test_args_nxv4i32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4i32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8i32( %a) { + ; RV32-LABEL: name: test_args_nxv8i32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8i32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16i32( %a) { + ; RV32-LABEL: name: test_args_nxv16i32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16i32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1i64( %a) { + ; RV32-LABEL: name: test_args_nxv1i64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1i64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2i64( %a) { + ; RV32-LABEL: name: test_args_nxv2i64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2i64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4i64( %a) { + ; RV32-LABEL: name: test_args_nxv4i64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4i64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8i64( %a) { + ; RV32-LABEL: name: test_args_nxv8i64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8i64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv64i1( %a) { + ; RV32-LABEL: name: test_args_nxv64i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv64i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv32i1( %a) { + ; RV32-LABEL: name: test_args_nxv32i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv32i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16i1( %a) { + ; RV32-LABEL: name: test_args_nxv16i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8i1( %a) { + ; RV32-LABEL: name: test_args_nxv8i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4i1( %a) { + ; RV32-LABEL: name: test_args_nxv4i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2i1( %a) { + ; RV32-LABEL: name: test_args_nxv2i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1i1( %a) { + ; RV32-LABEL: name: test_args_nxv1i1 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1i1 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1f32( %a) { + ; RV32-LABEL: name: test_args_nxv1f32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1f32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2f32( %a) { + ; RV32-LABEL: name: test_args_nxv2f32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2f32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4f32( %a) { + ; RV32-LABEL: name: test_args_nxv4f32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4f32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8f32( %a) { + ; RV32-LABEL: name: test_args_nxv8f32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8f32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16f32( %a) { + ; RV32-LABEL: name: test_args_nxv16f32 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16f32 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1f64( %a) { + ; RV32-LABEL: name: test_args_nxv1f64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1f64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2f64( %a) { + ; RV32-LABEL: name: test_args_nxv2f64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2f64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4f64( %a) { + ; RV32-LABEL: name: test_args_nxv4f64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4f64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8f64( %a) { + ; RV32-LABEL: name: test_args_nxv8f64 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8f64 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1f16( %a) { + ; RV32-LABEL: name: test_args_nxv1f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2f16( %a) { + ; RV32-LABEL: name: test_args_nxv2f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4f16( %a) { + ; RV32-LABEL: name: test_args_nxv4f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8f16( %a) { + ; RV32-LABEL: name: test_args_nxv8f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16f16( %a) { + ; RV32-LABEL: name: test_args_nxv16f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv32f16( %a) { + ; RV32-LABEL: name: test_args_nxv32f16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv32f16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv1b16( %a) { + ; RV32-LABEL: name: test_args_nxv1b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv1b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv2b16( %a) { + ; RV32-LABEL: name: test_args_nxv2b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv2b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv4b16( %a) { + ; RV32-LABEL: name: test_args_nxv4b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv4b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv8b16( %a) { + ; RV32-LABEL: name: test_args_nxv8b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m2 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv8b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m2 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv16b16( %a) { + ; RV32-LABEL: name: test_args_nxv16b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m4 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv16b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m4 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 + ; RV64-NEXT: PseudoRET +entry: + ret void +} + +define void @test_args_nxv32b16( %a) { + ; RV32-LABEL: name: test_args_nxv32b16 + ; RV32: bb.1.entry: + ; RV32-NEXT: liveins: $v8m8 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: test_args_nxv32b16 + ; RV64: bb.1.entry: + ; RV64-NEXT: liveins: $v8m8 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 + ; RV64-NEXT: PseudoRET +entry: + ret void +}