From 6aaaaf3d01469a11e5b36f8cbe60171d565bde6d Mon Sep 17 00:00:00 2001 From: Zhang Yi <18994118902@163.com> Date: Sun, 10 Mar 2024 21:26:18 -0700 Subject: [PATCH] [Clang][XTHeadVector] Define the unmasked defination of vsmul --- .../clang/Basic/riscv_vector_xtheadv.td | 29 +++++++++++++ .../Basic/riscv_vector_xtheadv_wrappers.td | 42 +++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv.td b/clang/include/clang/Basic/riscv_vector_xtheadv.td index 64c87b27181de2..9fecb953b35863 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv.td @@ -1101,6 +1101,35 @@ let ManualCodegen = [{ // 13.2. Vector Single-Width Averaging Add and Subtract defm th_vaadd : RVVSignedBinBuiltinSetRoundingMode; defm th_vasub : RVVSignedBinBuiltinSetRoundingMode; + } + +// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Operations + +let IntrinsicTypes = [-1,0,1], ManualCodegen = [{ + { + // LLVM intrinsic + // Unmasked: (passthru, op0, op1, vl) + // Masked: ?? + + SmallVector Operands; + if (IsMasked){ + // TODO: add support for masked vsmul + } + else { + Operands.push_back(llvm::PoisonValue::get(ResultType)); + Operands.push_back(Ops[0]); + Operands.push_back(Ops[1]); + Operands.push_back(Ops[2]); + IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + return Builder.CreateCall(F, Operands, ""); + } + } +}] in { + + defm th_vsmul : RVVOutOp1BuiltinSet<"th_vsmul", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; } include "riscv_vector_xtheadv_wrappers.td" diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td index 06dda98c023c43..527926d70f7383 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td @@ -1843,3 +1843,45 @@ let HeaderCode = }] in def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader; + +// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation + +let HeaderCode = +[{ + +#define __riscv_vsmul_vv_i8m1(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i8m1(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i8m2(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i8m2(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i8m4(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i8m4(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i8m8(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i8m8(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i16m1(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i16m1(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i16m2(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i16m2(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i16m4(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i16m4(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i16m8(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i16m8(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i32m1(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i32m1(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i32m2(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i32m2(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i32m4(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i32m4(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i32m8(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i32m8(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i64m1(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i64m1(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i64m2(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i64m2(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i64m4(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i64m4(op1_v, op2_v, vl) +#define __riscv_vsmul_vv_i64m8(op1_v, op2_v, vl) __riscv_th_vsmul_vv_i64m8(op1_v, op2_v, vl) + +#define __riscv_vsmul_vx_i8m1(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i8m1(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i8m2(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i8m2(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i8m4(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i8m4(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i8m8(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i8m8(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i16m1(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i16m1(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i16m2(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i16m2(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i16m4(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i16m4(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i16m8(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i16m8(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i32m1(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i32m1(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i32m2(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i32m2(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i32m4(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i32m4(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i32m8(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i32m8(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i64m1(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i64m1(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i64m2(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i64m2(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i64m4(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i64m4(op1_v, op2_x, vl) +#define __riscv_vsmul_vx_i64m8(op1_v, op2_x, vl) __riscv_th_vsmul_vx_i64m8(op1_v, op2_x, vl) + +}] in +def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader; \ No newline at end of file