Skip to content

Commit

Permalink
[LLVM+Clang][XTHeadVector] Implement intrinsics for vaadd and `vasu…
Browse files Browse the repository at this point in the history
…b` (llvm#76)

* [LLVM][XTHeadVector] Define vaadd/vasub

* [LLVM][XTHeadVector] Add test cases

* [NFC][XTHeadVector] Update README

* [LLVM][XTHeadVector] Support rounding mode for vaadd/vasub

* [Clang][XTHeadVector] Define `vaadd/vasub`

* [Clang][XTHeadVector] Add test cases for `vaadd/vasub`
  • Loading branch information
YanWQ-monad authored Mar 8, 2024
1 parent 5a40acc commit acff917
Show file tree
Hide file tree
Showing 11 changed files with 8,032 additions and 1 deletion.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ Any feature not listed below but present in the specification should be consider
- (Done) `12.12. Vector Single-Width Integer Multiply-Add Instructions`
- (Done) `12.13. Vector Widening Integer Multiply-Add Instructions`
- (Done) `12.14. Vector Integer Merge and Move Instructions`
- (WIP) `13. Vector Fixed-Point Arithmetic Instructions`
- (Done) `13.2. Vector Single-Width Averaging Add and Subtract`
- (Done) `13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation`
- (WIP) Clang intrinsics related to the `XTHeadVector` extension:
- (WIP) `6. Configuration-Setting and Utility`
Expand Down
61 changes: 61 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv.td
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ multiclass RVVSignedBinBuiltinSet
[["vv", "v", "vvv"],
["vx", "v", "vve"]]>;

multiclass RVVSignedBinBuiltinSetRoundingMode
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "v", "vvvu"],
["vx", "v", "vveu"]]>;

multiclass RVVUnsignedBinBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "Uv", "UvUvUv"],
Expand Down Expand Up @@ -1042,4 +1047,60 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {

// 12.15. Vector Integer Move Operations

// 13. Vector Fixed-Point Arithmetic Instructions
let HeaderCode =
[{
enum __RISCV_VXRM {
__RISCV_VXRM_RNU = 0,
__RISCV_VXRM_RNE = 1,
__RISCV_VXRM_RDN = 2,
__RISCV_VXRM_ROD = 3,
};
}] in
def vxrm_enum : RVVHeader;

let ManualCodegen = [{
{
// LLVM intrinsic
// Unmasked: (passthru, op0, op1, round_mode, vl)
// Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)

SmallVector<llvm::Value*, 7> Operands;
bool HasMaskedOff = !(
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
(!IsMasked && PolicyAttrs & RVV_VTA));
unsigned Offset = IsMasked ?
(HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);

if (!HasMaskedOff)
Operands.push_back(llvm::PoisonValue::get(ResultType));
else
Operands.push_back(Ops[IsMasked ? 1 : 0]);

Operands.push_back(Ops[Offset]); // op0
Operands.push_back(Ops[Offset + 1]); // op1

if (IsMasked)
Operands.push_back(Ops[0]); // mask

Operands.push_back(Ops[Offset + 2]); // vxrm
Operands.push_back(Ops[Offset + 3]); // vl

if (IsMasked) {
// TODO: no policy in LLVM side for masked intrinsics.
// Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops[Offset + 3]->getType()};
} else {
IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
}

llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
}] in {
// 13.2. Vector Single-Width Averaging Add and Subtract
defm th_vaadd : RVVSignedBinBuiltinSetRoundingMode;
defm th_vasub : RVVSignedBinBuiltinSetRoundingMode;
}

include "riscv_vector_xtheadv_wrappers.td"
139 changes: 139 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td
Original file line number Diff line number Diff line change
Expand Up @@ -1704,3 +1704,142 @@ def th_narrowing_integer_right_shift_wrapper_macros: RVVHeader;

// 12.15. Vector Integer Move Operations

// 13.2. Vector Single-Width Averaging Add and Subtract

let HeaderCode =
[{
// Vector Single-Width Averaging Add and Subtract
#define __riscv_vaadd_vv_i8m1(op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m1(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m1(op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m1(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m2(op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m2(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m2(op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m2(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m4(op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m4(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m4(op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m4(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m8(op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m8(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m8(op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m8(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m1(op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m1(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m1(op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m1(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m2(op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m2(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m2(op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m2(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m4(op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m4(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m4(op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m4(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m8(op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m8(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m8(op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m8(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m1(op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m1(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m1(op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m1(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m2(op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m2(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m2(op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m2(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m4(op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m4(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m4(op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m4(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m8(op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m8(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m8(op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m8(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m1(op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m1(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m1(op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m1(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m2(op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m2(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m2(op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m2(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m4(op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m4(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m4(op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m4(op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m8(op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m8(op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m8(op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m8(op1, op2, rm, vl)

#define __riscv_vaadd_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m8_m(mask, op1, op2, rm, vl)
#define __riscv_vaadd_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m8_m(mask, op1, op2, rm, vl)

#define __riscv_vasub_vv_i8m1(op1, op2, rm, vl) __riscv_th_vasub_vv_i8m1(op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m1(op1, op2, rm, vl) __riscv_th_vasub_vx_i8m1(op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m2(op1, op2, rm, vl) __riscv_th_vasub_vv_i8m2(op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m2(op1, op2, rm, vl) __riscv_th_vasub_vx_i8m2(op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m4(op1, op2, rm, vl) __riscv_th_vasub_vv_i8m4(op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m4(op1, op2, rm, vl) __riscv_th_vasub_vx_i8m4(op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m8(op1, op2, rm, vl) __riscv_th_vasub_vv_i8m8(op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m8(op1, op2, rm, vl) __riscv_th_vasub_vx_i8m8(op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m1(op1, op2, rm, vl) __riscv_th_vasub_vv_i16m1(op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m1(op1, op2, rm, vl) __riscv_th_vasub_vx_i16m1(op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m2(op1, op2, rm, vl) __riscv_th_vasub_vv_i16m2(op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m2(op1, op2, rm, vl) __riscv_th_vasub_vx_i16m2(op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m4(op1, op2, rm, vl) __riscv_th_vasub_vv_i16m4(op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m4(op1, op2, rm, vl) __riscv_th_vasub_vx_i16m4(op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m8(op1, op2, rm, vl) __riscv_th_vasub_vv_i16m8(op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m8(op1, op2, rm, vl) __riscv_th_vasub_vx_i16m8(op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m1(op1, op2, rm, vl) __riscv_th_vasub_vv_i32m1(op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m1(op1, op2, rm, vl) __riscv_th_vasub_vx_i32m1(op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m2(op1, op2, rm, vl) __riscv_th_vasub_vv_i32m2(op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m2(op1, op2, rm, vl) __riscv_th_vasub_vx_i32m2(op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m4(op1, op2, rm, vl) __riscv_th_vasub_vv_i32m4(op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m4(op1, op2, rm, vl) __riscv_th_vasub_vx_i32m4(op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m8(op1, op2, rm, vl) __riscv_th_vasub_vv_i32m8(op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m8(op1, op2, rm, vl) __riscv_th_vasub_vx_i32m8(op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m1(op1, op2, rm, vl) __riscv_th_vasub_vv_i64m1(op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m1(op1, op2, rm, vl) __riscv_th_vasub_vx_i64m1(op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m2(op1, op2, rm, vl) __riscv_th_vasub_vv_i64m2(op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m2(op1, op2, rm, vl) __riscv_th_vasub_vx_i64m2(op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m4(op1, op2, rm, vl) __riscv_th_vasub_vv_i64m4(op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m4(op1, op2, rm, vl) __riscv_th_vasub_vx_i64m4(op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m8(op1, op2, rm, vl) __riscv_th_vasub_vv_i64m8(op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m8(op1, op2, rm, vl) __riscv_th_vasub_vx_i64m8(op1, op2, rm, vl)

#define __riscv_vasub_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m8_m(mask, op1, op2, rm, vl)
#define __riscv_vasub_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m8_m(mask, op1, op2, rm, vl)

}] in
def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader;
Loading

0 comments on commit acff917

Please sign in to comment.