From 303306cf5ede678719ec1324bb02d3d02c014183 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 14 Jul 2018 23:28:39 +0100 Subject: [PATCH] Add unaligned volatile intrinsics --- src/libcore/intrinsics.rs | 9 +++++++++ src/librustc_codegen_llvm/builder.rs | 8 +++++++- src/librustc_codegen_llvm/intrinsic.rs | 14 ++++++++++++-- src/librustc_codegen_llvm/mir/operand.rs | 4 ++++ src/librustc_typeck/check/intrinsic.rs | 4 ++-- .../run-make-fulldeps/volatile-intrinsics/main.rs | 13 ++++++++++--- 6 files changed, 44 insertions(+), 8 deletions(-) diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 89fe2d941a350..854cb5f4e3b3f 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1085,6 +1085,15 @@ extern "rust-intrinsic" { /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store(dst: *mut T, val: T); + /// Perform a volatile load from the `src` pointer + /// The pointer is not required to be aligned. + #[cfg(not(stage0))] + pub fn unaligned_volatile_load(src: *const T) -> T; + /// Perform a volatile store to the `dst` pointer. + /// The pointer is not required to be aligned. + #[cfg(not(stage0))] + pub fn unaligned_volatile_store(dst: *mut T, val: T); + /// Returns the square root of an `f32` pub fn sqrtf32(x: f32) -> f32; /// Returns the square root of an `f64` diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 7b4998e85881e..c8dc579cd62e9 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -54,6 +54,7 @@ bitflags! { pub struct MemFlags: u8 { const VOLATILE = 1 << 0; const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; } } @@ -602,7 +603,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let ptr = self.check_store(val, ptr); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); - llvm::LLVMSetAlignment(store, align.abi() as c_uint); + let align = if flags.contains(MemFlags::UNALIGNED) { + 1 + } else { + align.abi() as c_uint + }; + llvm::LLVMSetAlignment(store, align); if flags.contains(MemFlags::VOLATILE) { llvm::LLVMSetVolatile(store, llvm::True); } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 6bb5456f9034f..567595b699798 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -234,15 +234,20 @@ pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>, memset_intrinsic(bx, true, substs.type_at(0), args[0].immediate(), args[1].immediate(), args[2].immediate()) } - "volatile_load" => { + "volatile_load" | "unaligned_volatile_load" => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_ty.ret.mode { ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); } let load = bx.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + cx.align_of(tp_ty).abi() as u32 + }; unsafe { - llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32); + llvm::LLVMSetAlignment(load, align); } to_immediate(bx, load, cx.layout_of(tp_ty)) }, @@ -251,6 +256,11 @@ pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>, args[1].val.volatile_store(bx, dst); return; }, + "unaligned_volatile_store" => { + let dst = args[0].deref(bx.cx); + args[1].val.unaligned_volatile_store(bx, dst); + return; + }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { let expect = cx.get_intrinsic(&("llvm.prefetch")); diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index 3d3a4400bd810..49cc07d6854a7 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -286,6 +286,10 @@ impl<'a, 'tcx> OperandValue { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } + pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) { + self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); + } + pub fn nontemporal_store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index c93023edcea08..46cf9d1fa7fad 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -270,9 +270,9 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "roundf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), "roundf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), - "volatile_load" => + "volatile_load" | "unaligned_volatile_load" => (1, vec![ tcx.mk_imm_ptr(param(0)) ], param(0)), - "volatile_store" => + "volatile_store" | "unaligned_volatile_store" => (1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_nil()), "ctpop" | "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | diff --git a/src/test/run-make-fulldeps/volatile-intrinsics/main.rs b/src/test/run-make-fulldeps/volatile-intrinsics/main.rs index 4d0d7672101c3..d214a20139c9c 100644 --- a/src/test/run-make-fulldeps/volatile-intrinsics/main.rs +++ b/src/test/run-make-fulldeps/volatile-intrinsics/main.rs @@ -10,17 +10,24 @@ #![feature(core_intrinsics, volatile)] -use std::intrinsics::{volatile_load, volatile_store}; +use std::intrinsics::{ + unaligned_volatile_load, unaligned_volatile_store, volatile_load, volatile_store, +}; use std::ptr::{read_volatile, write_volatile}; pub fn main() { unsafe { - let mut i : isize = 1; + let mut i: isize = 1; volatile_store(&mut i, 2); assert_eq!(volatile_load(&i), 2); } unsafe { - let mut i : isize = 1; + let mut i: isize = 1; + unaligned_volatile_store(&mut i, 2); + assert_eq!(unaligned_volatile_load(&i), 2); + } + unsafe { + let mut i: isize = 1; write_volatile(&mut i, 2); assert_eq!(read_volatile(&i), 2); }