Skip to content

Commit

Permalink
Merge pull request from GHSA-44mr-8vmm-wjhg
Browse files Browse the repository at this point in the history
This ensures that memories, even with zero contents, still have the
necessary virtual mappings as required by the code generator to report
out-of-bounds reads/writes.
  • Loading branch information
alexcrichton authored Nov 10, 2022
1 parent 2614f2e commit e60c374
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 11 deletions.
34 changes: 23 additions & 11 deletions crates/runtime/src/instance/allocator/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ use std::convert::TryFrom;
use std::mem;
use std::sync::Mutex;
use wasmtime_environ::{
DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, PrimaryMap, Tunables, VMOffsets,
WASM_PAGE_SIZE,
DefinedMemoryIndex, DefinedTableIndex, HostPtr, MemoryStyle, Module, PrimaryMap, Tunables,
VMOffsets, WASM_PAGE_SIZE,
};

mod index_allocator;
Expand Down Expand Up @@ -386,6 +386,20 @@ impl InstancePool {
.defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones");

match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
if bound < self.memories.static_memory_bound {
return Err(InstantiationError::Resource(anyhow!(
"static bound of {bound:x} bytes incompatible with \
reservation of {:x} bytes",
self.memories.static_memory_bound,
)));
}
}
MemoryStyle::Dynamic { .. } => {}
}

let memory = unsafe {
std::slice::from_raw_parts_mut(
self.memories.get_base(instance_index, defined_index),
Expand Down Expand Up @@ -658,6 +672,7 @@ struct MemoryPool {
initial_memory_offset: usize,
max_memories: usize,
max_instances: usize,
static_memory_bound: u64,
}

impl MemoryPool {
Expand All @@ -679,15 +694,11 @@ impl MemoryPool {
);
}

let memory_size = if instance_limits.memory_pages > 0 {
usize::try_from(
u64::from(tunables.static_memory_bound) * u64::from(WASM_PAGE_SIZE)
+ tunables.static_memory_offset_guard_size,
)
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?
} else {
0
};
let static_memory_bound =
u64::from(tunables.static_memory_bound) * u64::from(WASM_PAGE_SIZE);
let memory_size =
usize::try_from(static_memory_bound + tunables.static_memory_offset_guard_size)
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?;

assert!(
memory_size % crate::page_size() == 0,
Expand Down Expand Up @@ -745,6 +756,7 @@ impl MemoryPool {
max_memories,
max_instances,
max_memory_size: (instance_limits.memory_pages as usize) * (WASM_PAGE_SIZE as usize),
static_memory_bound,
};

Ok(pool)
Expand Down
41 changes: 41 additions & 0 deletions tests/all/pooling_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -721,3 +721,44 @@ configured maximum of 16 bytes; breakdown of allocation requirement:

Ok(())
}

#[test]
fn zero_memory_pages_disallows_oob() -> Result<()> {
let mut config = Config::new();
config.allocation_strategy(InstanceAllocationStrategy::Pooling {
strategy: PoolingAllocationStrategy::NextAvailable,
instance_limits: InstanceLimits {
count: 1,
memory_pages: 0,
..Default::default()
},
});

let engine = Engine::new(&config)?;
let module = Module::new(
&engine,
r#"
(module
(memory 0)
(func (export "load") (param i32) (result i32)
local.get 0
i32.load)
(func (export "store") (param i32 )
local.get 0
local.get 0
i32.store)
)
"#,
)?;
let mut store = Store::new(&engine, ());
let instance = Instance::new(&mut store, &module, &[])?;
let load32 = instance.get_typed_func::<i32, i32, _>(&mut store, "load")?;
let store32 = instance.get_typed_func::<i32, (), _>(&mut store, "store")?;
for i in 0..31 {
assert!(load32.call(&mut store, 1 << i).is_err());
assert!(store32.call(&mut store, 1 << i).is_err());
}
Ok(())
}

0 comments on commit e60c374

Please sign in to comment.