From 5697586ceee3e7be7c25487a534416839c878aa3 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 8 Mar 2023 13:00:40 -0600 Subject: [PATCH] Merge pull request from GHSA-ff4p-7xrq-q5r8 * x64: Remove incorrect `amode_add` lowering rules This commit removes two incorrect rules as part of the x64 backend's computation of addressing modes. These two rules folded a zero-extended 32-bit computation into the address mode operand, but this isn't correct as the 32-bit computation should be truncated to 32-bits but when folded into the address mode computation it happens with 64-bit operands, meaning truncation doesn't happen. * Add release notes for 6.0.1 --- RELEASES.md | 14 ++++++++++++++ cranelift/codegen/src/isa/x64/inst.isle | 14 -------------- .../filetests/filetests/isa/x64/amode-opt.clif | 9 +++++---- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 672356ad7e84..8a620134b3f8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,19 @@ -------------------------------------------------------------------------------- +## 6.0.1 + +Released 2023-03-08. + +### Fixed + +* Guest-controlled out-of-bounds read/write on x86\_64 + [GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8) + +* Miscompilation of `i8x16.select` with the same inputs on x86\_64 + [GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw) + +-------------------------------------------------------------------------------- + ## 6.0.0 Released 2023-02-20. diff --git a/cranelift/codegen/src/isa/x64/inst.isle b/cranelift/codegen/src/isa/x64/inst.isle index 213307cc43dd..536c455438ef 100644 --- a/cranelift/codegen/src/isa/x64/inst.isle +++ b/cranelift/codegen/src/isa/x64/inst.isle @@ -987,20 +987,6 @@ (rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift)))) (if (u32_lteq (u8_as_u32 shift) 3)) (Amode.ImmRegRegShift off base index shift flags)) -(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (uextend (ishl index (iconst (uimm8 shift))))) - (if (u32_lteq (u8_as_u32 shift) 3)) - (Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags)) - -;; Same, but with a uextend of a shift of a 32-bit add. This is valid -;; because we know our lowering of a narrower-than-64-bit `iadd` will -;; always write the full register width, so we can effectively ignore -;; the `uextend` and look through it to the `ishl`. -;; -;; Priority 3 to avoid conflict with the previous rule. -(rule 3 (amode_add (Amode.ImmReg off (valid_reg base) flags) - (uextend (ishl index @ (iadd _ _) (iconst (uimm8 shift))))) - (if (u32_lteq (u8_as_u32 shift) 3)) - (Amode.ImmRegRegShift off base index shift flags)) ;; -- Case 4 (absorbing constant offsets). ;; diff --git a/cranelift/filetests/filetests/isa/x64/amode-opt.clif b/cranelift/filetests/filetests/isa/x64/amode-opt.clif index be955ae566fd..07a49ca2a419 100644 --- a/cranelift/filetests/filetests/isa/x64/amode-opt.clif +++ b/cranelift/filetests/filetests/isa/x64/amode-opt.clif @@ -132,8 +132,9 @@ block0(v0: i64, v1: i32): ; pushq %rbp ; movq %rsp, %rbp ; block0: -; movl %esi, %ecx -; movq -1(%rdi,%rcx,8), %rax +; movq %rsi, %rdx +; shll $3, %edx, %edx +; movq -1(%rdi,%rdx,1), %rax ; movq %rbp, %rsp ; popq %rbp ; ret @@ -155,8 +156,8 @@ block0(v0: i64, v1: i32, v2: i32): ; block0: ; movq %rsi, %r8 ; addl %r8d, %edx, %r8d -; movq -1(%rdi,%r8,4), %rax +; shll $2, %r8d, %r8d +; movq -1(%rdi,%r8,1), %rax ; movq %rbp, %rsp ; popq %rbp ; ret -