From 511c71fa3a8dd8286092453eb04de77b1e968da0 Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Tue, 20 Sep 2022 13:29:41 -0400 Subject: [PATCH 1/6] chore: `warn(unsafe_op_in_unsafe_fn)` for {deno_webgpu,player,wgpu-types}` This commit bundles all of the trivial cases for enabling this lint, where no action is required beyond adding the lint. --- deno_webgpu/src/lib.rs | 2 ++ player/src/lib.rs | 2 ++ wgpu-types/src/lib.rs | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deno_webgpu/src/lib.rs b/deno_webgpu/src/lib.rs index 6f79aaa613..287e340920 100644 --- a/deno_webgpu/src/lib.rs +++ b/deno_webgpu/src/lib.rs @@ -1,5 +1,7 @@ // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. +#![warn(unsafe_op_in_unsafe_fn)] + use deno_core::error::AnyError; use deno_core::include_js_files; use deno_core::op; diff --git a/player/src/lib.rs b/player/src/lib.rs index dffb7c069d..0ef6080b77 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -6,6 +6,8 @@ * so that we don't accidentally try to use the same ID. !*/ +#![warn(unsafe_op_in_unsafe_fn)] + use wgc::device::trace; use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path}; diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index 707b1fd26f..59710afeca 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -7,7 +7,7 @@ // We don't use syntax sugar where it's not necessary. clippy::match_like_matches_macro, )] -#![warn(missing_docs)] +#![warn(missing_docs, unsafe_op_in_unsafe_fn)] #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; From 7c128a77c99b64dd38dd88319575bee02d17d348 Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Tue, 20 Sep 2022 13:31:03 -0400 Subject: [PATCH 2/6] chore: naively set `warn(unsafe_op_in_unsafe_fn)` for `wgpu` Do the simplest mechanical work necessary to enable and satisfy this lint; only put down `unsafe` blocks, don't try to inspect for correctness or anything else. --- wgpu/src/backend/direct.rs | 81 +++++++++++++++++++-------------- wgpu/src/lib.rs | 93 ++++++++++++++++++++++---------------- 2 files changed, 101 insertions(+), 73 deletions(-) diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index ddd595e788..e7d100d780 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -40,25 +40,26 @@ impl fmt::Debug for Context { impl Context { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] pub unsafe fn from_hal_instance(hal_instance: A::Instance) -> Self { - Self(wgc::hub::Global::from_hal_instance::( - "wgpu", - wgc::hub::IdentityManagerFactory, - hal_instance, - )) + Self(unsafe { + wgc::hub::Global::from_hal_instance::( + "wgpu", + wgc::hub::IdentityManagerFactory, + hal_instance, + ) + }) } /// # Safety /// /// - The raw instance handle returned must not be manually destroyed. pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { - self.0.instance_as_hal::() + unsafe { self.0.instance_as_hal::() } } pub unsafe fn from_core_instance(core_instance: wgc::instance::Instance) -> Self { - Self(wgc::hub::Global::from_instance( - wgc::hub::IdentityManagerFactory, - core_instance, - )) + Self(unsafe { + wgc::hub::Global::from_instance(wgc::hub::IdentityManagerFactory, core_instance) + }) } pub(crate) fn global(&self) -> &wgc::hub::Global { @@ -76,7 +77,7 @@ impl Context { &self, hal_adapter: hal::ExposedAdapter, ) -> wgc::id::AdapterId { - self.0.create_adapter_from_hal(hal_adapter, ()) + unsafe { self.0.create_adapter_from_hal(hal_adapter, ()) } } pub unsafe fn adapter_as_hal) -> R, R>( @@ -84,8 +85,10 @@ impl Context { adapter: wgc::id::AdapterId, hal_adapter_callback: F, ) -> R { - self.0 - .adapter_as_hal::(adapter, hal_adapter_callback) + unsafe { + self.0 + .adapter_as_hal::(adapter, hal_adapter_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -97,13 +100,15 @@ impl Context { trace_dir: Option<&std::path::Path>, ) -> Result<(Device, Queue), crate::RequestDeviceError> { let global = &self.0; - let (device_id, error) = global.create_device_from_hal( - *adapter, - hal_device, - &desc.map_label(|l| l.map(Borrowed)), - trace_dir, - (), - ); + let (device_id, error) = unsafe { + global.create_device_from_hal( + *adapter, + hal_device, + &desc.map_label(|l| l.map(Borrowed)), + trace_dir, + (), + ) + }; if let Some(err) = error { self.handle_error_fatal(err, "Adapter::create_device_from_hal"); } @@ -128,12 +133,14 @@ impl Context { desc: &TextureDescriptor, ) -> Texture { let global = &self.0; - let (id, error) = global.create_texture_from_hal::( - hal_texture, - device.id, - &desc.map_label(|l| l.map(Borrowed)), - (), - ); + let (id, error) = unsafe { + global.create_texture_from_hal::( + hal_texture, + device.id, + &desc.map_label(|l| l.map(Borrowed)), + (), + ) + }; if let Some(cause) = error { self.handle_error( &device.error_sink, @@ -155,8 +162,10 @@ impl Context { device: &Device, hal_device_callback: F, ) -> R { - self.0 - .device_as_hal::(device.id, hal_device_callback) + unsafe { + self.0 + .device_as_hal::(device.id, hal_device_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -169,8 +178,10 @@ impl Context { surface: &Surface, hal_surface_callback: F, ) -> R { - self.0 - .surface_as_hal_mut::(surface.id, hal_surface_callback) + unsafe { + self.0 + .surface_as_hal_mut::(surface.id, hal_surface_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -179,8 +190,10 @@ impl Context { texture: &Texture, hal_texture_callback: F, ) { - self.0 - .texture_as_hal::(texture.id, hal_texture_callback) + unsafe { + self.0 + .texture_as_hal::(texture.id, hal_texture_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -232,7 +245,7 @@ impl Context { self: &Arc, visual: *mut std::ffi::c_void, ) -> crate::Surface { - let id = self.0.instance_create_surface_from_visual(visual, ()); + let id = unsafe { self.0.instance_create_surface_from_visual(visual, ()) }; crate::Surface { context: Arc::clone(self), id: Surface { @@ -1265,7 +1278,7 @@ impl crate::Context for Context { label: desc.label.map(Borrowed), // Doesn't matter the value since spirv shaders aren't mutated to include // runtime checks - shader_bound_checks: wgt::ShaderBoundChecks::unchecked(), + shader_bound_checks: unsafe { wgt::ShaderBoundChecks::unchecked() }, }; let (id, error) = wgc::gfx_select!( device.id => global.device_create_shader_module_spirv(device.id, &descriptor, Borrowed(&desc.source), ()) diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 3a9f048df6..9e471b8d93 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -4,7 +4,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/master/logo.png")] -#![warn(missing_docs)] +#![warn(missing_docs, unsafe_op_in_unsafe_fn)] mod backend; pub mod util; @@ -1738,7 +1738,7 @@ impl Instance { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] pub unsafe fn from_hal(hal_instance: A::Instance) -> Self { Self { - context: Arc::new(C::from_hal_instance::(hal_instance)), + context: Arc::new(unsafe { C::from_hal_instance::(hal_instance) }), } } @@ -1754,7 +1754,7 @@ impl Instance { /// [`Instance`]: hal::Api::Instance #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))] pub unsafe fn as_hal(&self) -> Option<&A::Instance> { - self.context.instance_as_hal::() + unsafe { self.context.instance_as_hal::() } } /// Create an new instance of wgpu from a wgpu-core instance. @@ -1769,7 +1769,7 @@ impl Instance { #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))] pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self { Self { - context: Arc::new(C::from_core_instance(core_instance)), + context: Arc::new(unsafe { C::from_core_instance(core_instance) }), } } @@ -1815,7 +1815,7 @@ impl Instance { hal_adapter: hal::ExposedAdapter, ) -> Adapter { let context = Arc::clone(&self.context); - let id = context.create_adapter_from_hal(hal_adapter); + let id = unsafe { context.create_adapter_from_hal(hal_adapter) }; Adapter { context, id } } @@ -1855,7 +1855,7 @@ impl Instance { &self, layer: *mut std::ffi::c_void, ) -> Surface { - self.context.create_surface_from_core_animation_layer(layer) + unsafe { self.context.create_surface_from_core_animation_layer(layer) } } /// Creates a surface from `IDCompositionVisual`. @@ -1865,7 +1865,7 @@ impl Instance { /// - visual must be a valid IDCompositionVisual to create a surface upon. #[cfg(target_os = "windows")] pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface { - self.context.create_surface_from_visual(visual) + unsafe { self.context.create_surface_from_visual(visual) } } /// Creates a surface from a `web_sys::HtmlCanvasElement`. @@ -1978,20 +1978,22 @@ impl Adapter { trace_path: Option<&std::path::Path>, ) -> Result<(Device, Queue), RequestDeviceError> { let context = Arc::clone(&self.context); - self.context - .create_device_from_hal(&self.id, hal_device, desc, trace_path) - .map(|(device_id, queue_id)| { - ( - Device { - context: Arc::clone(&context), - id: device_id, - }, - Queue { - context, - id: queue_id, - }, - ) - }) + unsafe { + self.context + .create_device_from_hal(&self.id, hal_device, desc, trace_path) + } + .map(|(device_id, queue_id)| { + ( + Device { + context: Arc::clone(&context), + id: device_id, + }, + Queue { + context, + id: queue_id, + }, + ) + }) } /// Apply a callback to this `Adapter`'s underlying backend adapter. @@ -2018,8 +2020,10 @@ impl Adapter { &self, hal_adapter_callback: F, ) -> R { - self.context - .adapter_as_hal::(self.id, hal_adapter_callback) + unsafe { + self.context + .adapter_as_hal::(self.id, hal_adapter_callback) + } } /// Returns whether this adapter may present to the passed surface. @@ -2119,12 +2123,14 @@ impl Device { ) -> ShaderModule { ShaderModule { context: Arc::clone(&self.context), - id: Context::device_create_shader_module( - &*self.context, - &self.id, - desc, - wgt::ShaderBoundChecks::unchecked(), - ), + id: unsafe { + Context::device_create_shader_module( + &*self.context, + &self.id, + desc, + wgt::ShaderBoundChecks::unchecked(), + ) + }, } } @@ -2142,7 +2148,9 @@ impl Device { ) -> ShaderModule { ShaderModule { context: Arc::clone(&self.context), - id: Context::device_create_shader_module_spirv(&*self.context, &self.id, desc), + id: unsafe { + Context::device_create_shader_module_spirv(&*self.context, &self.id, desc) + }, } } @@ -2252,9 +2260,10 @@ impl Device { ) -> Texture { Texture { context: Arc::clone(&self.context), - id: self - .context - .create_texture_from_hal::(hal_texture, &self.id, desc), + id: unsafe { + self.context + .create_texture_from_hal::(hal_texture, &self.id, desc) + }, owned: true, } } @@ -2326,8 +2335,10 @@ impl Device { &self, hal_device_callback: F, ) -> R { - self.context - .device_as_hal::(&self.id, hal_device_callback) + unsafe { + self.context + .device_as_hal::(&self.id, hal_device_callback) + } } } @@ -2637,8 +2648,10 @@ impl Texture { &self, hal_texture_callback: F, ) { - self.context - .texture_as_hal::(&self.id, hal_texture_callback) + unsafe { + self.context + .texture_as_hal::(&self.id, hal_texture_callback) + } } /// Creates a view of this texture. @@ -3795,8 +3808,10 @@ impl Surface { &mut self, hal_surface_callback: F, ) -> R { - self.context - .surface_as_hal_mut::(&self.id, hal_surface_callback) + unsafe { + self.context + .surface_as_hal_mut::(&self.id, hal_surface_callback) + } } } From d799498c734dc28d1f1cc5bda16b5ce816cf4454 Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Tue, 20 Sep 2022 13:35:38 -0400 Subject: [PATCH 3/6] chore: naively set `warn(unsafe_op_in_unsafe_fn)` in `wgpu-core` Do the simplest mechanical work necessary to enable and satisfy this lint; only put down `unsafe` blocks, don't try to inspect for correctness or anything else. N.B.: that there _are_ some adjustments identified that could be made here, like breaking multiple individual `unsafe` operations into their `unsafe` spans. This is left for a follow-up commit. --- wgpu-core/src/command/bundle.rs | 82 ++++---- wgpu-core/src/command/compute.rs | 22 ++- wgpu-core/src/command/mod.rs | 3 +- wgpu-core/src/command/render.rs | 26 +-- wgpu-core/src/device/life.rs | 20 +- wgpu-core/src/device/mod.rs | 9 +- wgpu-core/src/device/queue.rs | 6 +- wgpu-core/src/instance.rs | 2 +- wgpu-core/src/lib.rs | 1 + wgpu-core/src/track/buffer.rs | 190 ++++++++++--------- wgpu-core/src/track/mod.rs | 51 ++--- wgpu-core/src/track/texture.rs | 309 +++++++++++++++++-------------- 12 files changed, 405 insertions(+), 316 deletions(-) diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 8731edc0ca..37560ec885 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -763,7 +763,7 @@ impl RenderBundle { let mut offsets = self.base.dynamic_offsets.as_slice(); let mut pipeline_layout_id = None::>; if let Some(ref label) = self.base.label { - raw.begin_debug_marker(label); + unsafe { raw.begin_debug_marker(label) }; } for command in self.base.commands.iter() { @@ -774,17 +774,19 @@ impl RenderBundle { bind_group_id, } => { let bind_group = bind_group_guard.get(bind_group_id).unwrap(); - raw.set_bind_group( - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, - index as u32, - &bind_group.raw, - &offsets[..num_dynamic_offsets as usize], - ); + unsafe { + raw.set_bind_group( + &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, + index as u32, + &bind_group.raw, + &offsets[..num_dynamic_offsets as usize], + ) + }; offsets = &offsets[num_dynamic_offsets as usize..]; } RenderCommand::SetPipeline(pipeline_id) => { let pipeline = pipeline_guard.get(pipeline_id).unwrap(); - raw.set_render_pipeline(&pipeline.raw); + unsafe { raw.set_render_pipeline(&pipeline.raw) }; pipeline_layout_id = Some(pipeline.layout_id.value); } @@ -805,7 +807,7 @@ impl RenderBundle { offset, size, }; - raw.set_index_buffer(bb, index_format); + unsafe { raw.set_index_buffer(bb, index_format) }; } RenderCommand::SetVertexBuffer { slot, @@ -824,7 +826,7 @@ impl RenderBundle { offset, size, }; - raw.set_vertex_buffer(slot, bb); + unsafe { raw.set_vertex_buffer(slot, bb) }; } RenderCommand::SetPushConstant { stages, @@ -841,18 +843,22 @@ impl RenderBundle { let data_slice = &self.base.push_constant_data [(values_offset as usize)..values_end_offset]; - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + unsafe { + raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + } } else { super::push_constant_clear( offset, size_bytes, |clear_offset, clear_data| { - raw.set_push_constants( - &pipeline_layout.raw, - stages, - clear_offset, - clear_data, - ); + unsafe { + raw.set_push_constants( + &pipeline_layout.raw, + stages, + clear_offset, + clear_data, + ) + }; }, ); } @@ -863,7 +869,7 @@ impl RenderBundle { first_vertex, first_instance, } => { - raw.draw(first_vertex, vertex_count, first_instance, instance_count); + unsafe { raw.draw(first_vertex, vertex_count, first_instance, instance_count) }; } RenderCommand::DrawIndexed { index_count, @@ -872,13 +878,15 @@ impl RenderBundle { base_vertex, first_instance, } => { - raw.draw_indexed( - first_index, - index_count, - base_vertex, - first_instance, - instance_count, - ); + unsafe { + raw.draw_indexed( + first_index, + index_count, + base_vertex, + first_instance, + instance_count, + ) + }; } RenderCommand::MultiDrawIndirect { buffer_id, @@ -892,7 +900,7 @@ impl RenderBundle { .raw .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; - raw.draw_indirect(buffer, offset, 1); + unsafe { raw.draw_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { buffer_id, @@ -906,7 +914,7 @@ impl RenderBundle { .raw .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; - raw.draw_indexed_indirect(buffer, offset, 1); + unsafe { raw.draw_indexed_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { .. } | RenderCommand::MultiDrawIndirectCount { .. } => { @@ -931,7 +939,7 @@ impl RenderBundle { } if let Some(_) = self.base.label { - raw.end_debug_marker(); + unsafe { raw.end_debug_marker() }; } Ok(()) @@ -1439,13 +1447,15 @@ pub mod bundle_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = bundle.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut bundle.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + bundle.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut bundle.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -1522,7 +1532,7 @@ pub mod bundle_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per RenderBundle.", ); diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index a4f3365627..c228519595 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -800,13 +800,15 @@ pub mod compute_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = pass.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut pass.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + pass.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut pass.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -854,7 +856,7 @@ pub mod compute_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per ComputePass.", ); @@ -905,7 +907,7 @@ pub mod compute_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(ComputeCommand::PushDebugGroup { @@ -929,7 +931,7 @@ pub mod compute_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(ComputeCommand::InsertDebugMarker { diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 06eee0bd51..f6dc086350 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -526,7 +526,8 @@ impl BindGroupStateChange { if let Some(current_bind_group) = self.last_states.get_mut(index as usize) { current_bind_group.reset(); } - dynamic_offsets.extend_from_slice(slice::from_raw_parts(offsets, offset_length)); + dynamic_offsets + .extend_from_slice(unsafe { slice::from_raw_parts(offsets, offset_length) }); } false } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 41287c9761..09af0bbe6a 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -2113,13 +2113,15 @@ pub mod render_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = pass.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut pass.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + pass.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut pass.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -2239,7 +2241,7 @@ pub mod render_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per RenderPass.", ); @@ -2402,7 +2404,7 @@ pub mod render_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(RenderCommand::PushDebugGroup { @@ -2426,7 +2428,7 @@ pub mod render_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(RenderCommand::InsertDebugMarker { @@ -2478,7 +2480,9 @@ pub mod render_ffi { render_bundle_ids: *const id::RenderBundleId, render_bundle_ids_length: usize, ) { - for &bundle_id in slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) { + for &bundle_id in + unsafe { slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) } + { pass.base .commands .push(RenderCommand::ExecuteBundle(bundle_id)); diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index bf8b3c86b9..605aea3dab 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -132,61 +132,61 @@ impl NonReferencedResources { if !self.buffers.is_empty() { profiling::scope!("destroy_buffers"); for raw in self.buffers.drain(..) { - device.destroy_buffer(raw); + unsafe { device.destroy_buffer(raw) }; } } if !self.textures.is_empty() { profiling::scope!("destroy_textures"); for raw in self.textures.drain(..) { - device.destroy_texture(raw); + unsafe { device.destroy_texture(raw) }; } } if !self.texture_views.is_empty() { profiling::scope!("destroy_texture_views"); for raw in self.texture_views.drain(..) { - device.destroy_texture_view(raw); + unsafe { device.destroy_texture_view(raw) }; } } if !self.samplers.is_empty() { profiling::scope!("destroy_samplers"); for raw in self.samplers.drain(..) { - device.destroy_sampler(raw); + unsafe { device.destroy_sampler(raw) }; } } if !self.bind_groups.is_empty() { profiling::scope!("destroy_bind_groups"); for raw in self.bind_groups.drain(..) { - device.destroy_bind_group(raw); + unsafe { device.destroy_bind_group(raw) }; } } if !self.compute_pipes.is_empty() { profiling::scope!("destroy_compute_pipelines"); for raw in self.compute_pipes.drain(..) { - device.destroy_compute_pipeline(raw); + unsafe { device.destroy_compute_pipeline(raw) }; } } if !self.render_pipes.is_empty() { profiling::scope!("destroy_render_pipelines"); for raw in self.render_pipes.drain(..) { - device.destroy_render_pipeline(raw); + unsafe { device.destroy_render_pipeline(raw) }; } } if !self.bind_group_layouts.is_empty() { profiling::scope!("destroy_bind_group_layouts"); for raw in self.bind_group_layouts.drain(..) { - device.destroy_bind_group_layout(raw); + unsafe { device.destroy_bind_group_layout(raw) }; } } if !self.pipeline_layouts.is_empty() { profiling::scope!("destroy_pipeline_layouts"); for raw in self.pipeline_layouts.drain(..) { - device.destroy_pipeline_layout(raw); + unsafe { device.destroy_pipeline_layout(raw) }; } } if !self.query_sets.is_empty() { profiling::scope!("destroy_query_sets"); for raw in self.query_sets.drain(..) { - device.destroy_query_set(raw); + unsafe { device.destroy_query_set(raw) }; } } } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 932b58e5ad..7a7e0be4c9 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -4519,10 +4519,11 @@ impl Global { }); }; - let shader = match device.create_shader_module_spirv(device_id, desc, &source) { - Ok(shader) => shader, - Err(e) => break e, - }; + let shader = + match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { + Ok(shader) => shader, + Err(e) => break e, + }; let id = fid.assign(shader, &mut token); return (id.0, None); }; diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 57f05592ee..091128cba2 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -116,7 +116,7 @@ pub(super) struct EncoderInFlight { impl EncoderInFlight { pub(super) unsafe fn land(mut self) -> A::CommandEncoder { - self.raw.reset_all(self.cmd_buffers.into_iter()); + unsafe { self.raw.reset_all(self.cmd_buffers.into_iter()) }; self.raw } } @@ -276,9 +276,9 @@ fn prepare_staging_buffer( impl StagingBuffer { unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> { if !self.is_coherent { - device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)); + unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)) }; } - device.unmap_buffer(&self.raw)?; + unsafe { device.unmap_buffer(&self.raw)? }; Ok(()) } } diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 7d6b94ed51..e237c6bcaf 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -586,7 +586,7 @@ impl Global { #[cfg(vulkan)] vulkan: None, dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: { inst.create_surface_from_visual(visual as _) }, + raw: unsafe { inst.create_surface_from_visual(visual as _) }, }), dx11: None, #[cfg(gl)] diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index c13fff0839..ce0f7f5087 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -28,6 +28,7 @@ #![warn( trivial_casts, trivial_numeric_casts, + unsafe_op_in_unsafe_fn, unused_extern_crates, unused_qualifications, // We don't match on a reference, unless required. diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index c67138eebe..933201dc85 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -147,19 +147,21 @@ impl BufferUsageScope { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - insert_or_merge( - None, - None, - &mut self.state, - &mut self.metadata, - index32, - index, - BufferStateProvider::Direct { state }, - ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), - }, - )?; + unsafe { + insert_or_merge( + None, + None, + &mut self.state, + &mut self.metadata, + index32, + index, + BufferStateProvider::Direct { state }, + ResourceMetadataProvider::Direct { + epoch, + ref_count: Cow::Borrowed(ref_count), + }, + )? + }; } Ok(()) @@ -490,27 +492,29 @@ impl BufferTracker { scope.tracker_assert_in_bounds(index); - if !scope.metadata.owned.get(index).unwrap_unchecked() { + if unsafe { !scope.metadata.owned.get(index).unwrap_unchecked() } { continue; } - insert_or_barrier_update( - None, - Some(&mut self.start), - &mut self.end, - &mut self.metadata, - index as u32, - index, - BufferStateProvider::Indirect { - state: &scope.state, - }, - None, - ResourceMetadataProvider::Indirect { - metadata: &scope.metadata, - }, - &mut self.temp, - ); + unsafe { + insert_or_barrier_update( + None, + Some(&mut self.start), + &mut self.end, + &mut self.metadata, + index as u32, + index, + BufferStateProvider::Indirect { + state: &scope.state, + }, + None, + ResourceMetadataProvider::Indirect { + metadata: &scope.metadata, + }, + &mut self.temp, + ) + }; - scope.metadata.reset(index); + unsafe { scope.metadata.reset(index) }; } } @@ -570,7 +574,7 @@ impl BufferStateProvider<'_> { BufferStateProvider::Direct { state } => state, BufferStateProvider::Indirect { state } => { strict_assert!(index < state.len()); - *state.get_unchecked(index) + *unsafe { state.get_unchecked(index) } } } } @@ -596,29 +600,33 @@ unsafe fn insert_or_merge( state_provider: BufferStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.owned.get(index).unwrap_unchecked() }; if !currently_owned { - insert( - life_guard, - start_states, + unsafe { + insert( + life_guard, + start_states, + current_states, + resource_metadata, + index, + state_provider, + None, + metadata_provider, + ) + }; + return Ok(()); + } + + unsafe { + merge( current_states, - resource_metadata, + index32, index, state_provider, - None, metadata_provider, - ); - return Ok(()); + ) } - - merge( - current_states, - index32, - index, - state_provider, - metadata_provider, - ) } /// If the resource isn't tracked @@ -651,32 +659,36 @@ unsafe fn insert_or_barrier_update( metadata_provider: ResourceMetadataProvider<'_, A>, barriers: &mut Vec>, ) { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.owned.get(index).unwrap_unchecked() }; if !currently_owned { - insert( - life_guard, - start_states, - current_states, - resource_metadata, - index, - start_state_provider, - end_state_provider, - metadata_provider, - ); + unsafe { + insert( + life_guard, + start_states, + current_states, + resource_metadata, + index, + start_state_provider, + end_state_provider, + metadata_provider, + ) + }; return; } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - barrier( - current_states, - index32, - index, - start_state_provider, - barriers, - ); - - update(current_states, index, update_state_provider); + unsafe { + barrier( + current_states, + index32, + index, + start_state_provider, + barriers, + ) + }; + + unsafe { update(current_states, index, update_state_provider) }; } #[inline(always)] @@ -690,8 +702,9 @@ unsafe fn insert( end_state_provider: Option>, metadata_provider: ResourceMetadataProvider<'_, A>, ) { - let new_start_state = start_state_provider.get_state(index); - let new_end_state = end_state_provider.map_or(new_start_state, |p| p.get_state(index)); + let new_start_state = unsafe { start_state_provider.get_state(index) }; + let new_end_state = + end_state_provider.map_or(new_start_state, |p| unsafe { p.get_state(index) }); // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. @@ -700,16 +713,19 @@ unsafe fn insert( log::trace!("\tbuf {index}: insert {new_start_state:?}..{new_end_state:?}"); - if let Some(&mut ref mut start_state) = start_states { - *start_state.get_unchecked_mut(index) = new_start_state; - } - *current_states.get_unchecked_mut(index) = new_end_state; + unsafe { + if let Some(&mut ref mut start_state) = start_states { + *start_state.get_unchecked_mut(index) = new_start_state; + } + *current_states.get_unchecked_mut(index) = new_end_state; + + let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); - let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); + resource_metadata.owned.set(index, true); - resource_metadata.owned.set(index, true); - *resource_metadata.epochs.get_unchecked_mut(index) = epoch; - *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); + *resource_metadata.epochs.get_unchecked_mut(index) = epoch; + *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); + } } #[inline(always)] @@ -720,14 +736,18 @@ unsafe fn merge( state_provider: BufferStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let current_state = current_states.get_unchecked_mut(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { current_states.get_unchecked_mut(index) }; + let new_state = unsafe { state_provider.get_state(index) }; let merged_state = *current_state | new_state; if invalid_resource_state(merged_state) { return Err(UsageConflict::from_buffer( - BufferId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + BufferId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), *current_state, new_state, )); @@ -748,8 +768,8 @@ unsafe fn barrier( state_provider: BufferStateProvider<'_>, barriers: &mut Vec>, ) { - let current_state = *current_states.get_unchecked(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { *current_states.get_unchecked(index) }; + let new_state = unsafe { state_provider.get_state(index) }; if skip_barrier(current_state, new_state) { return; @@ -770,8 +790,8 @@ unsafe fn update( index: usize, state_provider: BufferStateProvider<'_>, ) { - let current_state = current_states.get_unchecked_mut(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { current_states.get_unchecked_mut(index) }; + let new_state = unsafe { state_provider.get_state(index) }; *current_state = new_state; } diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 50ef96c874..6dcc2674b8 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -409,8 +409,8 @@ impl ResourceMetadata { /// Resets the metadata for a given index to sane "invalid" values. unsafe fn reset(&mut self, index: usize) { - *self.ref_counts.get_unchecked_mut(index) = None; - *self.epochs.get_unchecked_mut(index) = u32::MAX; + unsafe { *self.ref_counts.get_unchecked_mut(index) = None }; + unsafe { *self.epochs.get_unchecked_mut(index) = u32::MAX }; self.owned.set(index, false); } } @@ -445,18 +445,17 @@ impl ResourceMetadataProvider<'_, A> { } ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); - ( - *metadata.epochs.get_unchecked(index), + (unsafe { *metadata.epochs.get_unchecked(index) }, unsafe { metadata .ref_counts .get_unchecked(index) .clone() - .unwrap_unchecked(), - ) + .unwrap_unchecked() + }) } ResourceMetadataProvider::Resource { epoch } => { strict_assert!(life_guard.is_some()); - (epoch, life_guard.unwrap_unchecked().add_ref()) + (epoch, unsafe { life_guard.unwrap_unchecked() }.add_ref()) } } } @@ -472,7 +471,7 @@ impl ResourceMetadataProvider<'_, A> { | ResourceMetadataProvider::Resource { epoch, .. } => epoch, ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); - *metadata.epochs.get_unchecked(index) + unsafe { *metadata.epochs.get_unchecked(index) } } } } @@ -564,9 +563,11 @@ impl RenderBundleScope { textures: &hub::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; + unsafe { + self.textures + .merge_bind_group(textures, &bind_group.textures)? + }; Ok(()) } @@ -611,9 +612,11 @@ impl UsageScope { textures: &hub::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + unsafe { + self.buffers.merge_bind_group(&bind_group.buffers)?; + self.textures + .merge_bind_group(textures, &bind_group.textures)?; + } Ok(()) } @@ -740,13 +743,19 @@ impl Tracker { scope: &mut UsageScope, bind_group: &BindGroupStates, ) { - self.buffers - .set_and_remove_from_usage_scope_sparse(&mut scope.buffers, bind_group.buffers.used()); - self.textures.set_and_remove_from_usage_scope_sparse( - textures, - &mut scope.textures, - &bind_group.textures, - ); + unsafe { + self.buffers.set_and_remove_from_usage_scope_sparse( + &mut scope.buffers, + bind_group.buffers.used(), + ) + }; + unsafe { + self.textures.set_and_remove_from_usage_scope_sparse( + textures, + &mut scope.textures, + &bind_group.textures, + ) + }; } /// Tracks the stateless resources from the given renderbundle. It is expected diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index ea25011bc4..d5f88bdee8 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -114,7 +114,7 @@ impl ComplexTextureState { strict_assert_eq!(invalid_resource_state(desired_state), false); let mips = selector.mips.start as usize..selector.mips.end as usize; - for mip in complex.mips.get_unchecked_mut(mips) { + for mip in unsafe { complex.mips.get_unchecked_mut(mips) } { for &mut (_, ref mut state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) { *state = desired_state; } @@ -327,7 +327,7 @@ impl TextureUsageScope { bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { for &(id, ref selector, ref ref_count, state) in &bind_group.textures { - self.merge_single(storage, id, selector.clone(), ref_count, state)?; + unsafe { self.merge_single(storage, id, selector.clone(), ref_count, state)? }; } Ok(()) @@ -359,18 +359,20 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); - insert_or_merge( - texture_data_from_texture(storage, index32), - &mut self.set, - &mut self.metadata, - index32, - index, - TextureStateProvider::from_option(selector, new_state), - ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), - }, - )?; + unsafe { + insert_or_merge( + texture_data_from_texture(storage, index32), + &mut self.set, + &mut self.metadata, + index32, + index, + TextureStateProvider::from_option(selector, new_state), + ResourceMetadataProvider::Direct { + epoch, + ref_count: Cow::Borrowed(ref_count), + }, + )? + }; Ok(()) } @@ -465,11 +467,13 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); - self.metadata - .ref_counts - .get_unchecked(index) - .as_ref() - .unwrap_unchecked() + unsafe { + self.metadata + .ref_counts + .get_unchecked(index) + .as_ref() + .unwrap_unchecked() + } } /// Inserts a single texture and a state into the resource tracker. @@ -676,25 +680,27 @@ impl TextureTracker { let index = index32 as usize; scope.tracker_assert_in_bounds(index); - if !scope.metadata.owned.get(index).unwrap_unchecked() { + if unsafe { !scope.metadata.owned.get(index).unwrap_unchecked() } { continue; } - insert_or_barrier_update( - texture_data_from_texture(storage, index32), - Some(&mut self.start_set), - &mut self.end_set, - &mut self.metadata, - index32, - index, - TextureStateProvider::TextureSet { set: &scope.set }, - None, - ResourceMetadataProvider::Indirect { - metadata: &scope.metadata, - }, - &mut self.temp, - ); + unsafe { + insert_or_barrier_update( + texture_data_from_texture(storage, index32), + Some(&mut self.start_set), + &mut self.end_set, + &mut self.metadata, + index32, + index, + TextureStateProvider::TextureSet { set: &scope.set }, + None, + ResourceMetadataProvider::Indirect { + metadata: &scope.metadata, + }, + &mut self.temp, + ) + }; - scope.metadata.reset(index); + unsafe { scope.metadata.reset(index) }; } } @@ -856,10 +862,10 @@ impl<'a> TextureStateProvider<'a> { } } TextureStateProvider::TextureSet { set } => { - let new_state = *set.simple.get_unchecked(index); + let new_state = *unsafe { set.simple.get_unchecked(index) }; if new_state == TextureUses::COMPLEX { - let new_complex = set.complex.get(&index32).unwrap_unchecked(); + let new_complex = unsafe { set.complex.get(&index32).unwrap_unchecked() }; SingleOrManyStates::Many(EitherIter::Right( new_complex.to_selector_state_iter(), @@ -879,7 +885,7 @@ unsafe fn texture_data_from_texture( storage: &hub::Storage, TextureId>, index32: u32, ) -> (&LifeGuard, &TextureSelector) { - let texture = storage.get_unchecked(index32); + let texture = unsafe { storage.get_unchecked(index32) }; (&texture.life_guard, &texture.full_range) } @@ -902,31 +908,35 @@ unsafe fn insert_or_merge( state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.owned.get(index).unwrap_unchecked() }; if !currently_owned { - insert( - Some(texture_data), - None, + unsafe { + insert( + Some(texture_data), + None, + current_state_set, + resource_metadata, + index32, + index, + state_provider, + None, + metadata_provider, + ) + }; + return Ok(()); + } + + unsafe { + merge( + texture_data, current_state_set, - resource_metadata, index32, index, state_provider, - None, metadata_provider, - ); - return Ok(()); + ) } - - merge( - texture_data, - current_state_set, - index32, - index, - state_provider, - metadata_provider, - ) } /// If the resource isn't tracked @@ -959,42 +969,48 @@ unsafe fn insert_or_barrier_update( metadata_provider: ResourceMetadataProvider<'_, A>, barriers: &mut Vec>, ) { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.owned.get(index).unwrap_unchecked() }; if !currently_owned { - insert( - Some(texture_data), - start_state, - current_state_set, - resource_metadata, - index32, - index, - start_state_provider, - end_state_provider, - metadata_provider, - ); + unsafe { + insert( + Some(texture_data), + start_state, + current_state_set, + resource_metadata, + index32, + index, + start_state_provider, + end_state_provider, + metadata_provider, + ) + }; return; } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - barrier( - texture_data, - current_state_set, - index32, - index, - start_state_provider, - barriers, - ); + unsafe { + barrier( + texture_data, + current_state_set, + index32, + index, + start_state_provider, + barriers, + ) + }; let start_state_set = start_state.unwrap(); - update( - texture_data, - start_state_set, - current_state_set, - index32, - index, - update_state_provider, - ); + unsafe { + update( + texture_data, + start_state_set, + current_state_set, + index32, + index, + update_state_provider, + ) + }; } #[inline(always)] @@ -1009,7 +1025,7 @@ unsafe fn insert( end_state_provider: Option>, metadata_provider: ResourceMetadataProvider<'_, A>, ) { - let start_layers = start_state_provider.get_state(texture_data, index32, index); + let start_layers = unsafe { start_state_provider.get_state(texture_data, index32, index) }; match start_layers { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double @@ -1019,36 +1035,37 @@ unsafe fn insert( log::trace!("\ttex {index32}: insert start {state:?}"); if let Some(start_state) = start_state { - *start_state.simple.get_unchecked_mut(index) = state; + unsafe { *start_state.simple.get_unchecked_mut(index) = state }; } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { - *end_state.simple.get_unchecked_mut(index) = state; + unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } } SingleOrManyStates::Many(state_iter) => { let full_range = texture_data.unwrap().1.clone(); - let complex = ComplexTextureState::from_selector_state_iter(full_range, state_iter); + let complex = + unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; log::trace!("\ttex {index32}: insert start {complex:?}"); if let Some(start_state) = start_state { - *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; start_state.complex.insert(index32, complex.clone()); } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { - *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; end_state.complex.insert(index32, complex); } } } if let Some(end_state_provider) = end_state_provider { - match end_state_provider.get_state(texture_data, index32, index) { + match unsafe { end_state_provider.get_state(texture_data, index32, index) } { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. @@ -1058,29 +1075,31 @@ unsafe fn insert( // We only need to insert into the end, as there is guarenteed to be // a start state provider. - *end_state.simple.get_unchecked_mut(index) = state; + unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } SingleOrManyStates::Many(state_iter) => { let full_range = texture_data.unwrap().1.clone(); - let complex = ComplexTextureState::from_selector_state_iter(full_range, state_iter); + let complex = unsafe { + ComplexTextureState::from_selector_state_iter(full_range, state_iter) + }; log::trace!("\ttex {index32}: insert end {complex:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. - *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; end_state.complex.insert(index32, complex); } } } let (epoch, ref_count) = - metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index); + unsafe { metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index) }; resource_metadata.owned.set(index, true); - *resource_metadata.epochs.get_unchecked_mut(index) = epoch; - *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); + unsafe { *resource_metadata.epochs.get_unchecked_mut(index) = epoch }; + unsafe { *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count) }; } #[inline(always)] @@ -1092,19 +1111,19 @@ unsafe fn merge( state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let current_simple = current_state_set.simple.get_unchecked_mut(index); + let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many( + SingleOrManyStates::Many(unsafe { current_state_set .complex .get_mut(&index32) - .unwrap_unchecked(), - ) + .unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1114,7 +1133,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), texture_data.1.clone(), *current_simple, new_simple, @@ -1127,10 +1150,12 @@ unsafe fn merge( // Because we are now demoting this simple state to a complex state, // we actually need to make a whole new complex state for us to use // as there wasn't one before. - let mut new_complex = ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), - ); + let mut new_complex = unsafe { + ComplexTextureState::from_selector_state_iter( + texture_data.1.clone(), + iter::once((texture_data.1.clone(), *current_simple)), + ) + }; for (selector, new_state) in new_many { let merged_state = *current_simple | new_state; @@ -1141,7 +1166,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), selector, *current_simple, new_state, @@ -1182,7 +1211,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), TextureSelector { mips: mip_id..mip_id + 1, layers: layers.clone(), @@ -1203,7 +1236,7 @@ unsafe fn merge( for mip_id in selector.mips { strict_assert!((mip_id as usize) < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked_mut(mip_id as usize); + let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id as usize) }; for &mut (ref layers, ref mut current_layer_state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) @@ -1225,7 +1258,7 @@ unsafe fn merge( return Err(UsageConflict::from_texture( TextureId::zip( index32, - metadata_provider.get_epoch(index), + unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), TextureSelector { @@ -1256,14 +1289,16 @@ unsafe fn barrier( state_provider: TextureStateProvider<'_>, barriers: &mut Vec>, ) { - let current_simple = *current_state_set.simple.get_unchecked(index); + let current_simple = unsafe { *current_state_set.simple.get_unchecked(index) }; let current_state = if current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many(current_state_set.complex.get(&index32).unwrap_unchecked()) + SingleOrManyStates::Many(unsafe { + current_state_set.complex.get(&index32).unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1334,7 +1369,7 @@ unsafe fn barrier( for mip_id in selector.mips { strict_assert!((mip_id as usize) < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked(mip_id as usize); + let mip = unsafe { current_complex.mips.get_unchecked(mip_id as usize) }; for (layers, current_layer_state) in mip.iter_filter(&selector.layers) { if *current_layer_state == TextureUses::UNKNOWN @@ -1377,29 +1412,30 @@ unsafe fn update( index: usize, state_provider: TextureStateProvider<'_>, ) { - let start_simple = *start_state_set.simple.get_unchecked(index); + let start_simple = unsafe { *start_state_set.simple.get_unchecked(index) }; // We only ever need to update the start state here if the state is complex. // // If the state is simple, the first insert to the tracker would cover it. let mut start_complex = None; if start_simple == TextureUses::COMPLEX { - start_complex = Some(start_state_set.complex.get_mut(&index32).unwrap_unchecked()); + start_complex = + Some(unsafe { start_state_set.complex.get_mut(&index32).unwrap_unchecked() }); } - let current_simple = current_state_set.simple.get_unchecked_mut(index); + let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many( + SingleOrManyStates::Many(unsafe { current_state_set .complex .get_mut(&index32) - .unwrap_unchecked(), - ) + .unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1409,10 +1445,12 @@ unsafe fn update( // Because we are now demoting this simple state to a complex state, // we actually need to make a whole new complex state for us to use // as there wasn't one before. - let mut new_complex = ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), - ); + let mut new_complex = unsafe { + ComplexTextureState::from_selector_state_iter( + texture_data.1.clone(), + iter::once((texture_data.1.clone(), *current_simple)), + ) + }; for (selector, mut new_state) in new_many { if new_state == TextureUses::UNKNOWN { @@ -1442,7 +1480,7 @@ unsafe fn update( if let Some(&mut ref mut start_complex) = start_complex { strict_assert!(mip_id < start_complex.mips.len()); - let start_mip = start_complex.mips.get_unchecked_mut(mip_id); + let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) }; for &mut (_, ref mut current_start_state) in start_mip.isolate(layers, TextureUses::UNKNOWN) @@ -1457,11 +1495,13 @@ unsafe fn update( } } - *current_state_set.simple.get_unchecked_mut(index) = new_single; - current_state_set - .complex - .remove(&index32) - .unwrap_unchecked(); + unsafe { *current_state_set.simple.get_unchecked_mut(index) = new_single }; + unsafe { + current_state_set + .complex + .remove(&index32) + .unwrap_unchecked() + }; } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => { for (selector, new_state) in new_many { @@ -1474,7 +1514,7 @@ unsafe fn update( let mip_id = mip_id as usize; strict_assert!(mip_id < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked_mut(mip_id); + let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id) }; for &mut (ref layers, ref mut current_layer_state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) @@ -1490,11 +1530,12 @@ unsafe fn update( // otherwise we would know about this state. strict_assert!(start_complex.is_some()); - let start_complex = start_complex.as_deref_mut().unwrap_unchecked(); + let start_complex = + unsafe { start_complex.as_deref_mut().unwrap_unchecked() }; strict_assert!(mip_id < start_complex.mips.len()); - let start_mip = start_complex.mips.get_unchecked_mut(mip_id); + let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) }; for &mut (_, ref mut current_start_state) in start_mip.isolate(layers, TextureUses::UNKNOWN) From 02cdc8557732c3f94af3cfa9baf5d5c46b861a3a Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Sun, 9 Oct 2022 10:52:56 -0400 Subject: [PATCH 4/6] chore: naively set `warn(unsafe_op_in_unsafe_fn)` in `wgpu-hal` Do the simplest mechanical work necessary to enable and satisfy this lint; only put down `unsafe` blocks, don't try to inspect for correctness or anything else. N.B.: that there _are_ some adjustments identified that could be made here, like breaking multiple individual `unsafe` operations into their `unsafe` spans. This is left for a follow-up commit. --- wgpu-hal/Cargo.toml | 2 +- wgpu-hal/src/auxil/dxgi/exception.rs | 20 +- wgpu-hal/src/auxil/renderdoc.rs | 34 +- wgpu-hal/src/dx11/device.rs | 20 +- wgpu-hal/src/dx12/adapter.rs | 54 +- wgpu-hal/src/dx12/command.rs | 460 +++++----- wgpu-hal/src/dx12/descriptor.rs | 26 +- wgpu-hal/src/dx12/device.rs | 332 ++++---- wgpu-hal/src/dx12/instance.rs | 14 +- wgpu-hal/src/dx12/mod.rs | 95 ++- wgpu-hal/src/dx12/view.rs | 224 +++-- wgpu-hal/src/gles/adapter.rs | 157 ++-- wgpu-hal/src/gles/device.rs | 358 ++++---- wgpu-hal/src/gles/egl.rs | 211 ++--- wgpu-hal/src/gles/queue.rs | 1166 ++++++++++++++------------ wgpu-hal/src/gles/web.rs | 182 ++-- wgpu-hal/src/lib.rs | 1 + wgpu-hal/src/metal/device.rs | 2 +- wgpu-hal/src/metal/mod.rs | 11 +- wgpu-hal/src/metal/surface.rs | 7 +- wgpu-hal/src/vulkan/adapter.rs | 88 +- wgpu-hal/src/vulkan/command.rs | 493 ++++++----- wgpu-hal/src/vulkan/device.rs | 440 +++++----- wgpu-hal/src/vulkan/instance.rs | 117 ++- wgpu-hal/src/vulkan/mod.rs | 25 +- 25 files changed, 2527 insertions(+), 2012 deletions(-) diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index 5a0ce3afba..79c5832219 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -22,7 +22,7 @@ rustdoc-args = ["--cfg", "docsrs"] [lib] [features] -default = [] +default = ["gles"] metal = ["naga/msl-out", "block", "foreign-types"] vulkan = ["naga/spv-out", "ash", "gpu-alloc", "gpu-descriptor", "libloading", "smallvec"] gles = ["naga/glsl-out", "glow", "egl", "libloading"] diff --git a/wgpu-hal/src/auxil/dxgi/exception.rs b/wgpu-hal/src/auxil/dxgi/exception.rs index 31d5e6933a..fceac7db5f 100644 --- a/wgpu-hal/src/auxil/dxgi/exception.rs +++ b/wgpu-hal/src/auxil/dxgi/exception.rs @@ -46,21 +46,23 @@ unsafe extern "system" fn output_debug_string_handler( exception_info: *mut winnt::EXCEPTION_POINTERS, ) -> i32 { // See https://stackoverflow.com/a/41480827 - let record = &*(*exception_info).ExceptionRecord; + let record = unsafe { &*(*exception_info).ExceptionRecord }; if record.NumberParameters != 2 { return excpt::EXCEPTION_CONTINUE_SEARCH; } let message = match record.ExceptionCode { - winnt::DBG_PRINTEXCEPTION_C => String::from_utf8_lossy(slice::from_raw_parts( - record.ExceptionInformation[1] as *const u8, - record.ExceptionInformation[0], - )), - winnt::DBG_PRINTEXCEPTION_WIDE_C => { - Cow::Owned(String::from_utf16_lossy(slice::from_raw_parts( + winnt::DBG_PRINTEXCEPTION_C => String::from_utf8_lossy(unsafe { + slice::from_raw_parts( + record.ExceptionInformation[1] as *const u8, + record.ExceptionInformation[0], + ) + }), + winnt::DBG_PRINTEXCEPTION_WIDE_C => Cow::Owned(String::from_utf16_lossy(unsafe { + slice::from_raw_parts( record.ExceptionInformation[1] as *const u16, record.ExceptionInformation[0], - ))) - } + ) + })), _ => return excpt::EXCEPTION_CONTINUE_SEARCH, }; diff --git a/wgpu-hal/src/auxil/renderdoc.rs b/wgpu-hal/src/auxil/renderdoc.rs index 712eac4180..b2e9242a89 100644 --- a/wgpu-hal/src/auxil/renderdoc.rs +++ b/wgpu-hal/src/auxil/renderdoc.rs @@ -44,12 +44,13 @@ impl RenderDoc { let renderdoc_filename = "libVkLayer_GLES_RenderDoc.so"; #[cfg(unix)] - let renderdoc_result: Result = + let renderdoc_result: Result = unsafe { libloading::os::unix::Library::open( Some(renderdoc_filename), libloading::os::unix::RTLD_NOW | RTLD_NOLOAD, ) - .map(|lib| lib.into()); + } + .map(|lib| lib.into()); #[cfg(windows)] let renderdoc_result: Result = @@ -68,22 +69,23 @@ impl RenderDoc { } }; - let get_api: libloading::Symbol = match renderdoc_lib.get(b"RENDERDOC_GetAPI\0") { - Ok(api) => api, - Err(e) => { - return RenderDoc::NotAvailable { - reason: format!( - "Unable to get RENDERDOC_GetAPI from renderdoc library '{}': {:?}", - renderdoc_filename, e - ), + let get_api: libloading::Symbol = + match unsafe { renderdoc_lib.get(b"RENDERDOC_GetAPI\0") } { + Ok(api) => api, + Err(e) => { + return RenderDoc::NotAvailable { + reason: format!( + "Unable to get RENDERDOC_GetAPI from renderdoc library '{}': {:?}", + renderdoc_filename, e + ), + } } - } - }; + }; let mut obj = ptr::null_mut(); - match get_api(10401, &mut obj) { + match unsafe { get_api(10401, &mut obj) } { 1 => RenderDoc::Available { api: RenderDocApi { - api: *(obj as *mut renderdoc_sys::RENDERDOC_API_1_4_1), + api: unsafe { *(obj as *mut renderdoc_sys::RENDERDOC_API_1_4_1) }, lib: renderdoc_lib, }, }, @@ -115,7 +117,7 @@ impl RenderDoc { pub unsafe fn start_frame_capture(&self, device_handle: Handle, window_handle: Handle) -> bool { match *self { Self::Available { api: ref entry } => { - entry.api.StartFrameCapture.unwrap()(device_handle, window_handle); + unsafe { entry.api.StartFrameCapture.unwrap()(device_handle, window_handle) }; true } Self::NotAvailable { ref reason } => { @@ -129,7 +131,7 @@ impl RenderDoc { pub unsafe fn end_frame_capture(&self, device_handle: Handle, window_handle: Handle) { match *self { Self::Available { api: ref entry } => { - entry.api.EndFrameCapture.unwrap()(device_handle, window_handle); + unsafe { entry.api.EndFrameCapture.unwrap()(device_handle, window_handle) }; } Self::NotAvailable { ref reason } => { log::warn!("Could not end RenderDoc frame capture: {}", reason) diff --git a/wgpu-hal/src/dx11/device.rs b/wgpu-hal/src/dx11/device.rs index 7b095ba1df..3b087c4311 100644 --- a/wgpu-hal/src/dx11/device.rs +++ b/wgpu-hal/src/dx11/device.rs @@ -227,14 +227,16 @@ impl crate::Queue for super::Queue { impl super::D3D11Device { #[allow(trivial_casts)] // come on pub unsafe fn check_feature_support(&self, feature: d3d11::D3D11_FEATURE) -> T { - let mut value = mem::zeroed::(); - let ret = self.CheckFeatureSupport( - feature, - &mut value as *mut T as *mut c_void, - mem::size_of::() as u32, - ); - assert_eq!(ret.into_result(), Ok(())); - - value + unsafe { + let mut value = mem::zeroed::(); + let ret = self.CheckFeatureSupport( + feature, + &mut value as *mut T as *mut c_void, + mem::size_of::() as u32, + ); + assert_eq!(ret.into_result(), Ok(())); + + value + } } } diff --git a/wgpu-hal/src/dx12/adapter.rs b/wgpu-hal/src/dx12/adapter.rs index dbc6c987a5..236fec49c3 100644 --- a/wgpu-hal/src/dx12/adapter.rs +++ b/wgpu-hal/src/dx12/adapter.rs @@ -29,15 +29,17 @@ impl Drop for super::Adapter { impl super::Adapter { pub unsafe fn report_live_objects(&self) { - if let Ok(debug_device) = self - .raw - .cast::() - .into_result() - { - debug_device.ReportLiveDeviceObjects( - d3d12sdklayers::D3D12_RLDO_SUMMARY | d3d12sdklayers::D3D12_RLDO_IGNORE_INTERNAL, - ); - debug_device.destroy(); + if let Ok(debug_device) = unsafe { + self.raw + .cast::() + .into_result() + } { + unsafe { + debug_device.ReportLiveDeviceObjects( + d3d12sdklayers::D3D12_RLDO_SUMMARY | d3d12sdklayers::D3D12_RLDO_IGNORE_INTERNAL, + ) + }; + unsafe { debug_device.destroy() }; } } @@ -365,35 +367,33 @@ impl crate::Adapter for super::Adapter { let mut data = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: raw_format, - Support1: mem::zeroed(), - Support2: mem::zeroed(), + Support1: unsafe { mem::zeroed() }, + Support2: unsafe { mem::zeroed() }, }; - assert_eq!( - winerror::S_OK, + assert_eq!(winerror::S_OK, unsafe { self.device.CheckFeatureSupport( d3d12::D3D12_FEATURE_FORMAT_SUPPORT, &mut data as *mut _ as *mut _, mem::size_of::() as _, ) - ); + }); // Because we use a different format for SRV and UAV views of depth textures, we need to check // the features that use SRV/UAVs using the no-depth format. let mut data_no_depth = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: no_depth_format, - Support1: mem::zeroed(), - Support2: mem::zeroed(), + Support1: unsafe { mem::zeroed() }, + Support2: unsafe { mem::zeroed() }, }; if raw_format != no_depth_format { // Only-recheck if we're using a different format - assert_eq!( - winerror::S_OK, + assert_eq!(winerror::S_OK, unsafe { self.device.CheckFeatureSupport( d3d12::D3D12_FEATURE_FORMAT_SUPPORT, &mut data_no_depth as *mut _ as *mut _, mem::size_of::() as _, ) - ); + }); } else { // Same format, just copy over. data_no_depth = data; @@ -462,11 +462,13 @@ impl crate::Adapter for super::Adapter { let mut set_sample_count = |sc: u32, tfc: Tfc| { ms_levels.SampleCount = sc; - if self.device.CheckFeatureSupport( - d3d12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, - <*mut _>::cast(&mut ms_levels), - mem::size_of::() as _, - ) == winerror::S_OK + if unsafe { + self.device.CheckFeatureSupport( + d3d12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, + <*mut _>::cast(&mut ms_levels), + mem::size_of::() as _, + ) + } == winerror::S_OK && ms_levels.NumQualityLevels != 0 { caps.set(tfc, !no_msaa_load && !no_msaa_target); @@ -487,8 +489,8 @@ impl crate::Adapter for super::Adapter { let current_extent = { match surface.target { SurfaceTarget::WndHandle(wnd_handle) => { - let mut rect: windef::RECT = mem::zeroed(); - if winuser::GetClientRect(wnd_handle, &mut rect) != 0 { + let mut rect: windef::RECT = unsafe { mem::zeroed() }; + if unsafe { winuser::GetClientRect(wnd_handle, &mut rect) } != 0 { Some(wgt::Extent3d { width: (rect.right - rect.left) as u32, height: (rect.bottom - rect.top) as u32, diff --git a/wgpu-hal/src/dx12/command.rs b/wgpu-hal/src/dx12/command.rs index daeaa96e11..9f879e8b63 100644 --- a/wgpu-hal/src/dx12/command.rs +++ b/wgpu-hal/src/dx12/command.rs @@ -64,7 +64,7 @@ impl super::CommandEncoder { self.pass.kind = kind; if let Some(label) = label { let (wide_label, size) = self.temp.prepare_marker(label); - list.BeginEvent(0, wide_label.as_ptr() as *const _, size); + unsafe { list.BeginEvent(0, wide_label.as_ptr() as *const _, size) }; self.pass.has_label = true; } self.pass.dirty_root_elements = 0; @@ -76,7 +76,7 @@ impl super::CommandEncoder { let list = self.list.unwrap(); list.set_descriptor_heaps(&[]); if self.pass.has_label { - list.EndEvent(); + unsafe { list.EndEvent() }; } self.pass.clear(); } @@ -86,11 +86,13 @@ impl super::CommandEncoder { let list = self.list.unwrap(); let index = self.pass.dirty_vertex_buffers.trailing_zeros(); self.pass.dirty_vertex_buffers ^= 1 << index; - list.IASetVertexBuffers( - index, - 1, - self.pass.vertex_buffers.as_ptr().offset(index as isize), - ); + unsafe { + list.IASetVertexBuffers( + index, + 1, + self.pass.vertex_buffers.as_ptr().offset(index as isize), + ); + } } if let Some(root_index) = self.pass.layout.special_constants_root_index { let needs_update = match self.pass.root_elements[root_index as usize] { @@ -244,7 +246,7 @@ impl crate::CommandEncoder for super::CommandEncoder { if let Some(label) = label { let cwstr = conv::map_label(label); - list.SetName(cwstr.as_ptr()); + unsafe { list.SetName(cwstr.as_ptr()) }; } self.list = Some(list); @@ -290,32 +292,38 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: barrier.buffer.resource.as_mut_ptr(), - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: s0, - StateAfter: s1, + unsafe { + *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: barrier.buffer.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: s0, + StateAfter: s1, + } }; self.temp.barriers.push(raw); } else if barrier.usage.start == crate::BufferUses::STORAGE_READ_WRITE { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { - pResource: barrier.buffer.resource.as_mut_ptr(), + unsafe { + *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: barrier.buffer.resource.as_mut_ptr(), + } }; self.temp.barriers.push(raw); } } if !self.temp.barriers.is_empty() { - self.list - .unwrap() - .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + self.list + .unwrap() + .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()) + }; } } @@ -340,13 +348,15 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: barrier.texture.resource.as_mut_ptr(), - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: s0, - StateAfter: s1, + unsafe { + *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: barrier.texture.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: s0, + StateAfter: s1, + } }; let mip_level_count = match barrier.range.mip_level_count { @@ -383,12 +393,14 @@ impl crate::CommandEncoder for super::CommandEncoder { for rel_mip_level in 0..mip_level_count { for rel_array_layer in 0..array_layer_count { for plane in planes.clone() { - raw.u.Transition_mut().Subresource = - barrier.texture.calc_subresource( - barrier.range.base_mip_level + rel_mip_level, - barrier.range.base_array_layer + rel_array_layer, - plane, - ); + unsafe { + raw.u.Transition_mut().Subresource = + barrier.texture.calc_subresource( + barrier.range.base_mip_level + rel_mip_level, + barrier.range.base_array_layer + rel_array_layer, + plane, + ); + }; self.temp.barriers.push(raw); } } @@ -398,19 +410,23 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { - pResource: barrier.texture.resource.as_mut_ptr(), + unsafe { + *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: barrier.texture.resource.as_mut_ptr(), + } }; self.temp.barriers.push(raw); } } if !self.temp.barriers.is_empty() { - self.list - .unwrap() - .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + self.list + .unwrap() + .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()) + }; } } @@ -419,13 +435,15 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut offset = range.start; while offset < range.end { let size = super::ZERO_BUFFER_SIZE.min(range.end - offset); - list.CopyBufferRegion( - buffer.resource.as_mut_ptr(), - offset, - self.shared.zero_buffer.as_mut_ptr(), - 0, - size, - ); + unsafe { + list.CopyBufferRegion( + buffer.resource.as_mut_ptr(), + offset, + self.shared.zero_buffer.as_mut_ptr(), + 0, + size, + ) + }; offset += size; } } @@ -440,13 +458,15 @@ impl crate::CommandEncoder for super::CommandEncoder { { let list = self.list.unwrap(); for r in regions { - list.CopyBufferRegion( - dst.resource.as_mut_ptr(), - r.dst_offset, - src.resource.as_mut_ptr(), - r.src_offset, - r.size.get(), - ); + unsafe { + list.CopyBufferRegion( + dst.resource.as_mut_ptr(), + r.dst_offset, + src.resource.as_mut_ptr(), + r.src_offset, + r.size.get(), + ) + }; } } @@ -463,27 +483,33 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&r.src_base.origin, &r.size); - *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.src_base); - *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.dst_base); - - list.CopyTextureRegion( - &dst_location, - r.dst_base.origin.x, - r.dst_base.origin.y, - r.dst_base.origin.z, - &src_location, - &src_box, - ); + unsafe { + *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.src_base) + }; + unsafe { + *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.dst_base) + }; + + unsafe { + list.CopyTextureRegion( + &dst_location, + r.dst_base.origin.x, + r.dst_base.origin.y, + r.dst_base.origin.z, + &src_location, + &src_box, + ) + }; } } @@ -499,25 +525,32 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&wgt::Origin3d::ZERO, &r.size); - *src_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(dst.format); - *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.texture_base); - list.CopyTextureRegion( - &dst_location, - r.texture_base.origin.x, - r.texture_base.origin.y, - r.texture_base.origin.z, - &src_location, - &src_box, - ); + unsafe { + *src_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(dst.format) + }; + unsafe { + *dst_location.u.SubresourceIndex_mut() = + dst.calc_subresource_for_copy(&r.texture_base) + }; + unsafe { + list.CopyTextureRegion( + &dst_location, + r.texture_base.origin.x, + r.texture_base.origin.y, + r.texture_base.origin.z, + &src_location, + &src_box, + ) + }; } } @@ -534,37 +567,48 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&r.texture_base.origin, &r.size); - *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.texture_base); - *dst_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(src.format); - list.CopyTextureRegion(&dst_location, 0, 0, 0, &src_location, &src_box); + unsafe { + *src_location.u.SubresourceIndex_mut() = + src.calc_subresource_for_copy(&r.texture_base) + }; + unsafe { + *dst_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(src.format) + }; + unsafe { list.CopyTextureRegion(&dst_location, 0, 0, 0, &src_location, &src_box) }; } } unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) { - self.list - .unwrap() - .BeginQuery(set.raw.as_mut_ptr(), set.raw_ty, index); + unsafe { + self.list + .unwrap() + .BeginQuery(set.raw.as_mut_ptr(), set.raw_ty, index) + }; } unsafe fn end_query(&mut self, set: &super::QuerySet, index: u32) { - self.list - .unwrap() - .EndQuery(set.raw.as_mut_ptr(), set.raw_ty, index); + unsafe { + self.list + .unwrap() + .EndQuery(set.raw.as_mut_ptr(), set.raw_ty, index) + }; } unsafe fn write_timestamp(&mut self, set: &super::QuerySet, index: u32) { - self.list.unwrap().EndQuery( - set.raw.as_mut_ptr(), - d3d12::D3D12_QUERY_TYPE_TIMESTAMP, - index, - ); + unsafe { + self.list.unwrap().EndQuery( + set.raw.as_mut_ptr(), + d3d12::D3D12_QUERY_TYPE_TIMESTAMP, + index, + ) + }; } unsafe fn reset_queries(&mut self, _set: &super::QuerySet, _range: Range) { // nothing to do here @@ -577,20 +621,22 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, _stride: wgt::BufferSize, ) { - self.list.unwrap().ResolveQueryData( - set.raw.as_mut_ptr(), - set.raw_ty, - range.start, - range.end - range.start, - buffer.resource.as_mut_ptr(), - offset, - ); + unsafe { + self.list.unwrap().ResolveQueryData( + set.raw.as_mut_ptr(), + set.raw_ty, + range.start, + range.end - range.start, + buffer.resource.as_mut_ptr(), + offset, + ) + }; } // render unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor) { - self.begin_pass(super::PassKind::Render, desc.label); + unsafe { self.begin_pass(super::PassKind::Render, desc.label) }; let mut color_views = [native::CpuDescriptor { ptr: 0 }; crate::MAX_COLOR_ATTACHMENTS]; for (rtv, cat) in color_views.iter_mut().zip(desc.color_attachments.iter()) { if let Some(cat) = cat.as_ref() { @@ -612,12 +658,14 @@ impl crate::CommandEncoder for super::CommandEncoder { }; let list = self.list.unwrap(); - list.OMSetRenderTargets( - desc.color_attachments.len() as u32, - color_views.as_ptr(), - 0, - ds_view, - ); + unsafe { + list.OMSetRenderTargets( + desc.color_attachments.len() as u32, + color_views.as_ptr(), + 0, + ds_view, + ) + }; self.pass.resolves.clear(); for (rtv, cat) in color_views.iter().zip(desc.color_attachments.iter()) { @@ -657,7 +705,7 @@ impl crate::CommandEncoder for super::CommandEncoder { if !ds_view.is_null() && !flags.is_empty() { list.clear_depth_stencil_view( - *ds_view, + unsafe { *ds_view }, flags, ds.clear_value.0, ds.clear_value.1 as u8, @@ -680,8 +728,8 @@ impl crate::CommandEncoder for super::CommandEncoder { right: desc.extent.width as i32, bottom: desc.extent.height as i32, }; - list.RSSetViewports(1, &raw_vp); - list.RSSetScissorRects(1, &raw_rect); + unsafe { list.RSSetViewports(1, &raw_vp) }; + unsafe { list.RSSetScissorRects(1, &raw_rect) }; } unsafe fn end_render_pass(&mut self) { @@ -695,54 +743,70 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResove`. - *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: resolve.src.0.as_mut_ptr(), - Subresource: resolve.src.1, - StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, - StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE, + unsafe { + *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: resolve.src.0.as_mut_ptr(), + Subresource: resolve.src.1, + StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE, + } }; self.temp.barriers.push(barrier); - *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: resolve.dst.0.as_mut_ptr(), - Subresource: resolve.dst.1, - StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, - StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + unsafe { + *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: resolve.dst.0.as_mut_ptr(), + Subresource: resolve.dst.1, + StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + } }; self.temp.barriers.push(barrier); } if !self.temp.barriers.is_empty() { profiling::scope!("ID3D12GraphicsCommandList::ResourceBarrier"); - list.ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + list.ResourceBarrier( + self.temp.barriers.len() as u32, + self.temp.barriers.as_ptr(), + ) + }; } for resolve in self.pass.resolves.iter() { profiling::scope!("ID3D12GraphicsCommandList::ResolveSubresource"); - list.ResolveSubresource( - resolve.dst.0.as_mut_ptr(), - resolve.dst.1, - resolve.src.0.as_mut_ptr(), - resolve.src.1, - resolve.format, - ); + unsafe { + list.ResolveSubresource( + resolve.dst.0.as_mut_ptr(), + resolve.dst.1, + resolve.src.0.as_mut_ptr(), + resolve.src.1, + resolve.format, + ) + }; } // Flip all the barriers to reverse, back into `COLOR_TARGET`. for barrier in self.temp.barriers.iter_mut() { - let transition = barrier.u.Transition_mut(); + let transition = unsafe { barrier.u.Transition_mut() }; mem::swap(&mut transition.StateBefore, &mut transition.StateAfter); } if !self.temp.barriers.is_empty() { profiling::scope!("ID3D12GraphicsCommandList::ResourceBarrier"); - list.ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + list.ResourceBarrier( + self.temp.barriers.len() as u32, + self.temp.barriers.as_ptr(), + ) + }; } } - self.end_pass(); + unsafe { self.end_pass() }; } unsafe fn set_bind_group( @@ -818,18 +882,22 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn insert_debug_marker(&mut self, label: &str) { let (wide_label, size) = self.temp.prepare_marker(label); - self.list - .unwrap() - .SetMarker(0, wide_label.as_ptr() as *const _, size); + unsafe { + self.list + .unwrap() + .SetMarker(0, wide_label.as_ptr() as *const _, size) + }; } unsafe fn begin_debug_marker(&mut self, group_label: &str) { let (wide_label, size) = self.temp.prepare_marker(group_label); - self.list - .unwrap() - .BeginEvent(0, wide_label.as_ptr() as *const _, size); + unsafe { + self.list + .unwrap() + .BeginEvent(0, wide_label.as_ptr() as *const _, size) + }; } unsafe fn end_debug_marker(&mut self) { - self.list.unwrap().EndEvent() + unsafe { self.list.unwrap().EndEvent() } } unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) { @@ -842,7 +910,7 @@ impl crate::CommandEncoder for super::CommandEncoder { }; list.set_pipeline_state(pipeline.raw); - list.IASetPrimitiveTopology(pipeline.topology); + unsafe { list.IASetPrimitiveTopology(pipeline.topology) }; for (index, (vb, &stride)) in self .pass @@ -891,7 +959,7 @@ impl crate::CommandEncoder for super::CommandEncoder { MinDepth: depth_range.start, MaxDepth: depth_range.end, }; - self.list.unwrap().RSSetViewports(1, &raw_vp); + unsafe { self.list.unwrap().RSSetViewports(1, &raw_vp) }; } unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect) { let raw_rect = d3d12::D3D12_RECT { @@ -900,7 +968,7 @@ impl crate::CommandEncoder for super::CommandEncoder { right: (rect.x + rect.w) as i32, bottom: (rect.y + rect.h) as i32, }; - self.list.unwrap().RSSetScissorRects(1, &raw_rect); + unsafe { self.list.unwrap().RSSetScissorRects(1, &raw_rect) }; } unsafe fn set_stencil_reference(&mut self, value: u32) { self.list.unwrap().set_stencil_reference(value); @@ -916,7 +984,7 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.prepare_draw(start_vertex as i32, start_instance); + unsafe { self.prepare_draw(start_vertex as i32, start_instance) }; self.list .unwrap() .draw(vertex_count, instance_count, start_vertex, start_instance); @@ -929,7 +997,7 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.prepare_draw(base_vertex, start_instance); + unsafe { self.prepare_draw(base_vertex, start_instance) }; self.list.unwrap().draw_indexed( index_count, instance_count, @@ -944,15 +1012,17 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw.as_mut_ptr(), - draw_count, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } unsafe fn draw_indexed_indirect( &mut self, @@ -960,15 +1030,17 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), - draw_count, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } unsafe fn draw_indirect_count( &mut self, @@ -978,15 +1050,17 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw.as_mut_ptr(), - max_count, - buffer.resource.as_mut_ptr(), - offset, - count_buffer.resource.as_mut_ptr(), - count_offset, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw.as_mut_ptr(), + max_count, + buffer.resource.as_mut_ptr(), + offset, + count_buffer.resource.as_mut_ptr(), + count_offset, + ) + }; } unsafe fn draw_indexed_indirect_count( &mut self, @@ -996,24 +1070,26 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), - max_count, - buffer.resource.as_mut_ptr(), - offset, - count_buffer.resource.as_mut_ptr(), - count_offset, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), + max_count, + buffer.resource.as_mut_ptr(), + offset, + count_buffer.resource.as_mut_ptr(), + count_offset, + ) + }; } // compute unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) { - self.begin_pass(super::PassKind::Compute, desc.label); + unsafe { self.begin_pass(super::PassKind::Compute, desc.label) }; } unsafe fn end_compute_pass(&mut self) { - self.end_pass(); + unsafe { self.end_pass() }; } unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) { @@ -1035,13 +1111,15 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) { self.prepare_dispatch([0; 3]); //TODO: update special constants indirectly - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.dispatch.as_mut_ptr(), - 1, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.dispatch.as_mut_ptr(), + 1, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } } diff --git a/wgpu-hal/src/dx12/descriptor.rs b/wgpu-hal/src/dx12/descriptor.rs index 67c8eca4fe..46fdd3eecd 100644 --- a/wgpu-hal/src/dx12/descriptor.rs +++ b/wgpu-hal/src/dx12/descriptor.rs @@ -157,7 +157,7 @@ impl FixedSizeHeap { } unsafe fn destroy(&self) { - self.raw.destroy(); + unsafe { self.raw.destroy() }; } } @@ -225,7 +225,7 @@ impl CpuPool { pub(super) unsafe fn destroy(&self) { for heap in &self.heaps { - heap.destroy(); + unsafe { heap.destroy() }; } } } @@ -274,7 +274,7 @@ impl CpuHeap { } pub(super) unsafe fn destroy(self) { - self.inner.into_inner().raw.destroy(); + unsafe { self.inner.into_inner().raw.destroy() }; } } @@ -296,14 +296,16 @@ pub(super) unsafe fn upload( ) -> Result { let count = src.stage.len() as u32; let index = dst.allocate_slice(count as u64)?; - device.CopyDescriptors( - 1, - &dst.cpu_descriptor_at(index), - &count, - count, - src.stage.as_ptr(), - dummy_copy_counts.as_ptr(), - dst.ty as u32, - ); + unsafe { + device.CopyDescriptors( + 1, + &dst.cpu_descriptor_at(index), + &count, + count, + src.stage.as_ptr(), + dummy_copy_counts.as_ptr(), + dst.ty as u32, + ) + }; Ok(dst.at(index, count as u64)) } diff --git a/wgpu-hal/src/dx12/device.rs b/wgpu-hal/src/dx12/device.rs index 3809470ebe..7e24815d97 100644 --- a/wgpu-hal/src/dx12/device.rs +++ b/wgpu-hal/src/dx12/device.rs @@ -182,7 +182,7 @@ impl super::Device { .fence .set_event_on_completion(self.idler.event, value); hr.into_device_result("Set event")?; - synchapi::WaitForSingleObject(self.idler.event.0, winbase::INFINITE); + unsafe { synchapi::WaitForSingleObject(self.idler.event.0, winbase::INFINITE) }; Ok(()) } @@ -319,13 +319,13 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { self.rtv_pool.lock().free_handle(self.null_rtv_handle); - self.rtv_pool.into_inner().destroy(); - self.dsv_pool.into_inner().destroy(); - self.srv_uav_pool.into_inner().destroy(); - self.sampler_pool.into_inner().destroy(); - self.shared.destroy(); - self.idler.destroy(); - queue.raw.destroy(); + unsafe { self.rtv_pool.into_inner().destroy() }; + unsafe { self.dsv_pool.into_inner().destroy() }; + unsafe { self.srv_uav_pool.into_inner().destroy() }; + unsafe { self.sampler_pool.into_inner().destroy() }; + unsafe { self.shared.destroy() }; + unsafe { self.idler.destroy() }; + unsafe { queue.raw.destroy() }; } unsafe fn create_buffer( @@ -377,30 +377,32 @@ impl crate::Device for super::Device { VisibleNodeMask: 0, }; - let hr = self.raw.CreateCommittedResource( - &heap_properties, - if self.private_caps.heap_create_not_zeroed { - D3D12_HEAP_FLAG_CREATE_NOT_ZEROED - } else { - d3d12::D3D12_HEAP_FLAG_NONE - }, - &raw_desc, - d3d12::D3D12_RESOURCE_STATE_COMMON, - ptr::null(), - &d3d12::ID3D12Resource::uuidof(), - resource.mut_void(), - ); + let hr = unsafe { + self.raw.CreateCommittedResource( + &heap_properties, + if self.private_caps.heap_create_not_zeroed { + D3D12_HEAP_FLAG_CREATE_NOT_ZEROED + } else { + d3d12::D3D12_HEAP_FLAG_NONE + }, + &raw_desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + }; hr.into_device_result("Buffer creation")?; if let Some(label) = desc.label { let cwstr = conv::map_label(label); - resource.SetName(cwstr.as_ptr()); + unsafe { resource.SetName(cwstr.as_ptr()) }; } Ok(super::Buffer { resource, size }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { - buffer.resource.destroy(); + unsafe { buffer.resource.destroy() }; } unsafe fn map_buffer( &self, @@ -408,17 +410,17 @@ impl crate::Device for super::Device { range: crate::MemoryRange, ) -> Result { let mut ptr = ptr::null_mut(); - let hr = (*buffer.resource).Map(0, ptr::null(), &mut ptr); + let hr = unsafe { (*buffer.resource).Map(0, ptr::null(), &mut ptr) }; hr.into_device_result("Map buffer")?; Ok(crate::BufferMapping { - ptr: ptr::NonNull::new(ptr.offset(range.start as isize) as *mut _).unwrap(), + ptr: ptr::NonNull::new(unsafe { ptr.offset(range.start as isize) } as *mut _).unwrap(), //TODO: double-check this. Documentation is a bit misleading - // it implies that Map/Unmap is needed to invalidate/flush memory. is_coherent: true, }) } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> { - (*buffer.resource).Unmap(0, ptr::null()); + unsafe { (*buffer.resource).Unmap(0, ptr::null()) }; Ok(()) } unsafe fn flush_mapped_ranges(&self, _buffer: &super::Buffer, _ranges: I) {} @@ -470,24 +472,26 @@ impl crate::Device for super::Device { VisibleNodeMask: 0, }; - let hr = self.raw.CreateCommittedResource( - &heap_properties, - if self.private_caps.heap_create_not_zeroed { - D3D12_HEAP_FLAG_CREATE_NOT_ZEROED - } else { - d3d12::D3D12_HEAP_FLAG_NONE - }, - &raw_desc, - d3d12::D3D12_RESOURCE_STATE_COMMON, - ptr::null(), // clear value - &d3d12::ID3D12Resource::uuidof(), - resource.mut_void(), - ); + let hr = unsafe { + self.raw.CreateCommittedResource( + &heap_properties, + if self.private_caps.heap_create_not_zeroed { + D3D12_HEAP_FLAG_CREATE_NOT_ZEROED + } else { + d3d12::D3D12_HEAP_FLAG_NONE + }, + &raw_desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), // clear value + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + }; hr.into_device_result("Texture creation")?; if let Some(label) = desc.label { let cwstr = conv::map_label(label); - resource.SetName(cwstr.as_ptr()); + unsafe { resource.SetName(cwstr.as_ptr()) }; } Ok(super::Texture { @@ -500,7 +504,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_texture(&self, texture: super::Texture) { - texture.resource.destroy(); + unsafe { texture.resource.destroy() }; } unsafe fn create_texture_view( @@ -518,13 +522,15 @@ impl crate::Device for super::Device { texture.calc_subresource(desc.range.base_mip_level, desc.range.base_array_layer, 0), ), handle_srv: if desc.usage.intersects(crate::TextureUses::RESOURCE) { - let raw_desc = view_desc.to_srv(); + let raw_desc = unsafe { view_desc.to_srv() }; let handle = self.srv_uav_pool.lock().alloc_handle(); - self.raw.CreateShaderResourceView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateShaderResourceView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -532,26 +538,30 @@ impl crate::Device for super::Device { handle_uav: if desc.usage.intersects( crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE, ) { - let raw_desc = view_desc.to_uav(); + let raw_desc = unsafe { view_desc.to_uav() }; let handle = self.srv_uav_pool.lock().alloc_handle(); - self.raw.CreateUnorderedAccessView( - texture.resource.as_mut_ptr(), - ptr::null_mut(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateUnorderedAccessView( + texture.resource.as_mut_ptr(), + ptr::null_mut(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None }, handle_rtv: if desc.usage.intersects(crate::TextureUses::COLOR_TARGET) { - let raw_desc = view_desc.to_rtv(); + let raw_desc = unsafe { view_desc.to_rtv() }; let handle = self.rtv_pool.lock().alloc_handle(); - self.raw.CreateRenderTargetView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateRenderTargetView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -560,13 +570,15 @@ impl crate::Device for super::Device { .usage .intersects(crate::TextureUses::DEPTH_STENCIL_READ) { - let raw_desc = view_desc.to_dsv(desc.format.into()); + let raw_desc = unsafe { view_desc.to_dsv(desc.format.into()) }; let handle = self.dsv_pool.lock().alloc_handle(); - self.raw.CreateDepthStencilView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateDepthStencilView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -575,13 +587,15 @@ impl crate::Device for super::Device { .usage .intersects(crate::TextureUses::DEPTH_STENCIL_WRITE) { - let raw_desc = view_desc.to_dsv(FormatAspects::empty()); + let raw_desc = unsafe { view_desc.to_dsv(FormatAspects::empty()) }; let handle = self.dsv_pool.lock().alloc_handle(); - self.raw.CreateDepthStencilView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateDepthStencilView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -664,7 +678,7 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { let cwstr = conv::map_label(label); - allocator.SetName(cwstr.as_ptr()); + unsafe { allocator.SetName(cwstr.as_ptr()) }; } Ok(super::CommandEncoder { @@ -681,12 +695,12 @@ impl crate::Device for super::Device { unsafe fn destroy_command_encoder(&self, encoder: super::CommandEncoder) { if let Some(list) = encoder.list { list.close(); - list.destroy(); + unsafe { list.destroy() }; } for list in encoder.free_lists { - list.destroy(); + unsafe { list.destroy() }; } - encoder.allocator.destroy(); + unsafe { encoder.allocator.destroy() }; } unsafe fn create_bind_group_layout( @@ -737,10 +751,10 @@ impl crate::Device for super::Device { } unsafe fn destroy_bind_group_layout(&self, bg_layout: super::BindGroupLayout) { if let Some(cpu_heap) = bg_layout.cpu_heap_views { - cpu_heap.destroy(); + unsafe { cpu_heap.destroy() }; } if let Some(cpu_heap) = bg_layout.cpu_heap_samplers { - cpu_heap.destroy(); + unsafe { cpu_heap.destroy() }; } } @@ -1064,9 +1078,9 @@ impl crate::Device for super::Device { if !error.is_null() { log::error!( "Root signature serialization error: {:?}", - error.as_c_str().to_str().unwrap() + unsafe { error.as_c_str() }.to_str().unwrap() ); - error.destroy(); + unsafe { error.destroy() }; return Err(crate::DeviceError::Lost); } @@ -1074,13 +1088,13 @@ impl crate::Device for super::Device { .raw .create_root_signature(blob, 0) .into_device_result("Root signature creation")?; - blob.destroy(); + unsafe { blob.destroy() }; log::debug!("\traw = {:?}", raw); if let Some(label) = desc.label { let cwstr = conv::map_label(label); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::PipelineLayout { @@ -1101,7 +1115,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_pipeline_layout(&self, pipeline_layout: super::PipelineLayout) { - pipeline_layout.shared.signature.destroy(); + unsafe { pipeline_layout.shared.signature.destroy() }; } unsafe fn create_bind_group( @@ -1155,7 +1169,7 @@ impl crate::Device for super::Device { BufferLocation: gpu_address, SizeInBytes: ((size - 1) | size_mask) + 1, }; - self.raw.CreateConstantBufferView(&raw_desc, handle); + unsafe { self.raw.CreateConstantBufferView(&raw_desc, handle) }; } wgt::BufferBindingType::Storage { read_only: true } => { let mut raw_desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { @@ -1163,39 +1177,47 @@ impl crate::Device for super::Device { Shader4ComponentMapping: view::D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { - FirstElement: data.offset / 4, - NumElements: size / 4, - StructureByteStride: 0, - Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, + unsafe { + *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { + FirstElement: data.offset / 4, + NumElements: size / 4, + StructureByteStride: 0, + Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, + } + }; + unsafe { + self.raw.CreateShaderResourceView( + data.buffer.resource.as_mut_ptr(), + &raw_desc, + handle, + ) }; - self.raw.CreateShaderResourceView( - data.buffer.resource.as_mut_ptr(), - &raw_desc, - handle, - ); } wgt::BufferBindingType::Storage { read_only: false } => { let mut raw_desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, + }; + unsafe { + *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { + FirstElement: data.offset / 4, + NumElements: size / 4, + StructureByteStride: 0, + CounterOffsetInBytes: 0, + Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + } }; - *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { - FirstElement: data.offset / 4, - NumElements: size / 4, - StructureByteStride: 0, - CounterOffsetInBytes: 0, - Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + unsafe { + self.raw.CreateUnorderedAccessView( + data.buffer.resource.as_mut_ptr(), + ptr::null_mut(), + &raw_desc, + handle, + ) }; - self.raw.CreateUnorderedAccessView( - data.buffer.resource.as_mut_ptr(), - ptr::null_mut(), - &raw_desc, - handle, - ); } } inner.stage.push(handle); @@ -1229,24 +1251,28 @@ impl crate::Device for super::Device { let handle_views = match cpu_views { Some(inner) => { - let dual = descriptor::upload( - self.raw, - &inner, - &self.shared.heap_views, - &desc.layout.copy_counts, - )?; + let dual = unsafe { + descriptor::upload( + self.raw, + &inner, + &self.shared.heap_views, + &desc.layout.copy_counts, + ) + }?; Some(dual) } None => None, }; let handle_samplers = match cpu_samplers { Some(inner) => { - let dual = descriptor::upload( - self.raw, - &inner, - &self.shared.heap_samplers, - &desc.layout.copy_counts, - )?; + let dual = unsafe { + descriptor::upload( + self.raw, + &inner, + &self.shared.heap_samplers, + &desc.layout.copy_counts, + ) + }?; Some(dual) } None => None, @@ -1396,7 +1422,7 @@ impl crate::Device for super::Device { RasterizerState: raw_rasterizer, DepthStencilState: match desc.depth_stencil { Some(ref ds) => conv::map_depth_stencil(ds), - None => mem::zeroed(), + None => unsafe { mem::zeroed() }, }, InputLayout: d3d12::D3D12_INPUT_LAYOUT_DESC { pInputElementDescs: if input_element_descs.is_empty() { @@ -1437,16 +1463,18 @@ impl crate::Device for super::Device { let mut raw = native::PipelineState::null(); let hr = { profiling::scope!("ID3D12Device::CreateGraphicsPipelineState"); - self.raw.CreateGraphicsPipelineState( - &raw_desc, - &d3d12::ID3D12PipelineState::uuidof(), - raw.mut_void(), - ) + unsafe { + self.raw.CreateGraphicsPipelineState( + &raw_desc, + &d3d12::ID3D12PipelineState::uuidof(), + raw.mut_void(), + ) + } }; - blob_vs.destroy(); + unsafe { blob_vs.destroy() }; if !blob_fs.is_null() { - blob_fs.destroy(); + unsafe { blob_fs.destroy() }; } hr.into_result() @@ -1454,7 +1482,7 @@ impl crate::Device for super::Device { if let Some(name) = desc.label { let cwstr = conv::map_label(name); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::RenderPipeline { @@ -1465,7 +1493,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { - pipeline.raw.destroy(); + unsafe { pipeline.raw.destroy() }; } unsafe fn create_compute_pipeline( @@ -1485,7 +1513,7 @@ impl crate::Device for super::Device { ) }; - blob_cs.destroy(); + unsafe { blob_cs.destroy() }; let raw = pair.into_result().map_err(|err| { crate::PipelineError::Linkage(wgt::ShaderStages::COMPUTE, err.into_owned()) @@ -1493,7 +1521,7 @@ impl crate::Device for super::Device { if let Some(name) = desc.label { let cwstr = conv::map_label(name); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::ComputePipeline { @@ -1502,7 +1530,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { - pipeline.raw.destroy(); + unsafe { pipeline.raw.destroy() }; } unsafe fn create_query_set( @@ -1531,34 +1559,36 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { let cwstr = conv::map_label(label); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::QuerySet { raw, raw_ty }) } unsafe fn destroy_query_set(&self, set: super::QuerySet) { - set.raw.destroy(); + unsafe { set.raw.destroy() }; } unsafe fn create_fence(&self) -> Result { let mut raw = native::Fence::null(); - let hr = self.raw.CreateFence( - 0, - d3d12::D3D12_FENCE_FLAG_NONE, - &d3d12::ID3D12Fence::uuidof(), - raw.mut_void(), - ); + let hr = unsafe { + self.raw.CreateFence( + 0, + d3d12::D3D12_FENCE_FLAG_NONE, + &d3d12::ID3D12Fence::uuidof(), + raw.mut_void(), + ) + }; hr.into_device_result("Fence creation")?; Ok(super::Fence { raw }) } unsafe fn destroy_fence(&self, fence: super::Fence) { - fence.raw.destroy(); + unsafe { fence.raw.destroy() }; } unsafe fn get_fence_value( &self, fence: &super::Fence, ) -> Result { - Ok(fence.raw.GetCompletedValue()) + Ok(unsafe { fence.raw.GetCompletedValue() }) } unsafe fn wait( &self, @@ -1566,13 +1596,13 @@ impl crate::Device for super::Device { value: crate::FenceValue, timeout_ms: u32, ) -> Result { - if fence.raw.GetCompletedValue() >= value { + if unsafe { fence.raw.GetCompletedValue() } >= value { return Ok(true); } let hr = fence.raw.set_event_on_completion(self.idler.event, value); hr.into_device_result("Set event")?; - match synchapi::WaitForSingleObject(self.idler.event.0, timeout_ms) { + match unsafe { synchapi::WaitForSingleObject(self.idler.event.0, timeout_ms) } { winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::DeviceError::Lost), winbase::WAIT_OBJECT_0 => Ok(true), winerror::WAIT_TIMEOUT => Ok(false), @@ -1586,8 +1616,10 @@ impl crate::Device for super::Device { unsafe fn start_capture(&self) -> bool { #[cfg(feature = "renderdoc")] { - self.render_doc - .start_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + unsafe { + self.render_doc + .start_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + } } #[cfg(not(feature = "renderdoc"))] false @@ -1595,7 +1627,9 @@ impl crate::Device for super::Device { unsafe fn stop_capture(&self) { #[cfg(feature = "renderdoc")] - self.render_doc - .end_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + } } } diff --git a/wgpu-hal/src/dx12/instance.rs b/wgpu-hal/src/dx12/instance.rs index b300c39857..71f53e140e 100644 --- a/wgpu-hal/src/dx12/instance.rs +++ b/wgpu-hal/src/dx12/instance.rs @@ -21,7 +21,7 @@ impl crate::Instance for super::Instance { Ok(pair) => match pair.into_result() { Ok(debug_controller) => { debug_controller.enable_layer(); - debug_controller.Release(); + unsafe { debug_controller.Release() }; } Err(err) => { log::warn!("Unable to enable D3D12 debug interface: {}", err); @@ -43,11 +43,13 @@ impl crate::Instance for super::Instance { #[allow(trivial_casts)] if let Some(factory5) = factory.as_factory5() { let mut allow_tearing: minwindef::BOOL = minwindef::FALSE; - let hr = factory5.CheckFeatureSupport( - dxgi1_5::DXGI_FEATURE_PRESENT_ALLOW_TEARING, - &mut allow_tearing as *mut _ as *mut _, - mem::size_of::() as _, - ); + let hr = unsafe { + factory5.CheckFeatureSupport( + dxgi1_5::DXGI_FEATURE_PRESENT_ALLOW_TEARING, + &mut allow_tearing as *mut _ as *mut _, + mem::size_of::() as _, + ) + }; match hr.into_result() { Err(err) => log::warn!("Unable to check for tearing support: {}", err), diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index 6f9f18b6ca..6fdd26dba7 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -100,7 +100,7 @@ impl Instance { ) -> Surface { Surface { factory: self.factory, - target: SurfaceTarget::Visual(native::WeakPtr::from_raw(visual)), + target: SurfaceTarget::Visual(unsafe { native::WeakPtr::from_raw(visual) }), supports_allow_tearing: self.supports_allow_tearing, swap_chain: None, } @@ -183,7 +183,7 @@ struct Idler { impl Idler { unsafe fn destroy(self) { - self.fence.destroy(); + unsafe { self.fence.destroy() }; } } @@ -195,9 +195,11 @@ struct CommandSignatures { impl CommandSignatures { unsafe fn destroy(&self) { - self.draw.destroy(); - self.draw_indexed.destroy(); - self.dispatch.destroy(); + unsafe { + self.draw.destroy(); + self.draw_indexed.destroy(); + self.dispatch.destroy(); + } } } @@ -210,10 +212,12 @@ struct DeviceShared { impl DeviceShared { unsafe fn destroy(&self) { - self.zero_buffer.destroy(); - self.cmd_signatures.destroy(); - self.heap_views.raw.destroy(); - self.heap_samplers.raw.destroy(); + unsafe { + self.zero_buffer.destroy(); + self.cmd_signatures.destroy(); + self.heap_views.raw.destroy(); + self.heap_samplers.raw.destroy(); + } } } @@ -548,7 +552,7 @@ unsafe impl Sync for ComputePipeline {} impl SwapChain { unsafe fn release_resources(self) -> native::WeakPtr { for resource in self.resources { - resource.destroy(); + unsafe { resource.destroy() }; } self.raw } @@ -561,7 +565,7 @@ impl SwapChain { Some(duration) => duration.as_millis() as u32, None => winbase::INFINITE, }; - match synchapi::WaitForSingleObject(self.waitable, timeout_ms) { + match unsafe { synchapi::WaitForSingleObject(self.waitable, timeout_ms) } { winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::SurfaceError::Lost), winbase::WAIT_OBJECT_0 => Ok(true), winerror::WAIT_TIMEOUT => Ok(false), @@ -593,16 +597,18 @@ impl crate::Surface for Surface { //Note: this path doesn't properly re-initialize all of the things Some(sc) => { // can't have image resources in flight used by GPU - let _ = device.wait_idle(); - - let raw = sc.release_resources(); - let result = raw.ResizeBuffers( - config.swap_chain_size, - config.extent.width, - config.extent.height, - non_srgb_format, - flags, - ); + let _ = unsafe { device.wait_idle() }; + + let raw = unsafe { sc.release_resources() }; + let result = unsafe { + raw.ResizeBuffers( + config.swap_chain_size, + config.extent.width, + config.extent.height, + non_srgb_format, + flags, + ) + }; if let Err(err) = result.into_result() { log::error!("ResizeBuffers failed: {}", err); return Err(crate::SurfaceError::Other("window is in use")); @@ -664,7 +670,8 @@ impl crate::Surface for Surface { match self.target { SurfaceTarget::WndHandle(_) => {} SurfaceTarget::Visual(visual) => { - if let Err(err) = visual.SetContent(swap_chain1.as_unknown()).into_result() + if let Err(err) = + unsafe { visual.SetContent(swap_chain1.as_unknown()) }.into_result() { log::error!("Unable to SetContent: {}", err); return Err(crate::SurfaceError::Other( @@ -674,9 +681,9 @@ impl crate::Surface for Surface { } } - match swap_chain1.cast::().into_result() { + match unsafe { swap_chain1.cast::() }.into_result() { Ok(swap_chain3) => { - swap_chain1.destroy(); + unsafe { swap_chain1.destroy() }; swap_chain3 } Err(err) => { @@ -692,20 +699,24 @@ impl crate::Surface for Surface { // Disable automatic Alt+Enter handling by DXGI. const DXGI_MWA_NO_WINDOW_CHANGES: u32 = 1; const DXGI_MWA_NO_ALT_ENTER: u32 = 2; - self.factory.MakeWindowAssociation( - wnd_handle, - DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER, - ); + unsafe { + self.factory.MakeWindowAssociation( + wnd_handle, + DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER, + ) + }; } SurfaceTarget::Visual(_) => {} } - swap_chain.SetMaximumFrameLatency(config.swap_chain_size); - let waitable = swap_chain.GetFrameLatencyWaitableObject(); + unsafe { swap_chain.SetMaximumFrameLatency(config.swap_chain_size) }; + let waitable = unsafe { swap_chain.GetFrameLatencyWaitableObject() }; let mut resources = vec![native::Resource::null(); config.swap_chain_size as usize]; for (i, res) in resources.iter_mut().enumerate() { - swap_chain.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void()); + unsafe { + swap_chain.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void()) + }; } self.swap_chain = Some(SwapChain { @@ -723,12 +734,14 @@ impl crate::Surface for Surface { unsafe fn unconfigure(&mut self, device: &Device) { if let Some(mut sc) = self.swap_chain.take() { - let _ = sc.wait(None); - //TODO: this shouldn't be needed, - // but it complains that the queue is still used otherwise - let _ = device.wait_idle(); - let raw = sc.release_resources(); - raw.destroy(); + unsafe { + let _ = sc.wait(None); + //TODO: this shouldn't be needed, + // but it complains that the queue is still used otherwise + let _ = device.wait_idle(); + let raw = sc.release_resources(); + raw.destroy(); + } } } @@ -738,9 +751,9 @@ impl crate::Surface for Surface { ) -> Result>, crate::SurfaceError> { let sc = self.swap_chain.as_mut().unwrap(); - sc.wait(timeout)?; + unsafe { sc.wait(timeout) }?; - let base_index = sc.raw.GetCurrentBackBufferIndex() as usize; + let base_index = unsafe { sc.raw.GetCurrentBackBufferIndex() } as usize; let index = (base_index + sc.acquired_count) % sc.resources.len(); sc.acquired_count += 1; @@ -803,14 +816,14 @@ impl crate::Queue for Queue { }; profiling::scope!("IDXGISwapchain3::Present"); - sc.raw.Present(interval, flags); + unsafe { sc.raw.Present(interval, flags) }; Ok(()) } unsafe fn get_timestamp_period(&self) -> f32 { let mut frequency = 0u64; - self.raw.GetTimestampFrequency(&mut frequency); + unsafe { self.raw.GetTimestampFrequency(&mut frequency) }; (1_000_000_000.0 / frequency as f64) as f32 } } diff --git a/wgpu-hal/src/dx12/view.rs b/wgpu-hal/src/dx12/view.rs index 81e187aaf1..39d9707312 100644 --- a/wgpu-hal/src/dx12/view.rs +++ b/wgpu-hal/src/dx12/view.rs @@ -42,16 +42,18 @@ impl ViewDescriptor { Format: self.format_nodepth, ViewDimension: 0, Shader4ComponentMapping: D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } /* @@ -67,67 +69,81 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_SRV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_SRV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - PlaneSlice: 0, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_SRV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_SRV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::Cube if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBE; - *desc.u.TextureCube_mut() = d3d12::D3D12_TEXCUBE_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.TextureCube_mut() = d3d12::D3D12_TEXCUBE_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBEARRAY; - *desc.u.TextureCubeArray_mut() = d3d12::D3D12_TEXCUBE_ARRAY_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - First2DArrayFace: self.array_layer_base, - NumCubes: if self.array_layer_count == !0 { - !0 - } else { - self.array_layer_count / 6 - }, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.TextureCubeArray_mut() = d3d12::D3D12_TEXCUBE_ARRAY_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + First2DArrayFace: self.array_layer_base, + NumCubes: if self.array_layer_count == !0 { + !0 + } else { + self.array_layer_count / 6 + }, + ResourceMinLODClamp: 0.0, + } } } } @@ -139,14 +155,16 @@ impl ViewDescriptor { let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: self.format_nodepth, ViewDimension: 0, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_UAV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_UAV { + MipSlice: self.mip_level_base, + } } } /* @@ -160,26 +178,32 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_UAV { - MipSlice: self.mip_level_base, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_UAV { + MipSlice: self.mip_level_base, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_UAV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_UAV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_UAV { - MipSlice: self.mip_level_base, - FirstWSlice: self.array_layer_base, - WSize: self.array_layer_count, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_UAV { + MipSlice: self.mip_level_base, + FirstWSlice: self.array_layer_base, + WSize: self.array_layer_count, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { @@ -194,14 +218,16 @@ impl ViewDescriptor { let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { Format: self.format, ViewDimension: 0, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_RTV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_RTV { + MipSlice: self.mip_level_base, + } } } /* @@ -215,41 +241,51 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_RTV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_RTV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_RTV { - MipSlice: self.mip_level_base, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_RTV { + MipSlice: self.mip_level_base, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_RTV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_RTV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_RTV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_RTV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_RTV { - MipSlice: self.mip_level_base, - FirstWSlice: self.array_layer_base, - WSize: self.array_layer_count, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_RTV { + MipSlice: self.mip_level_base, + FirstWSlice: self.array_layer_base, + WSize: self.array_layer_count, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { @@ -277,14 +313,16 @@ impl ViewDescriptor { } flags }, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_DSV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_DSV { + MipSlice: self.mip_level_base, + } } } /* @@ -298,31 +336,39 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_DSV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_DSV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_DSV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_DSV { + MipSlice: self.mip_level_base, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_DSV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_DSV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_DSV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_DSV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D3 diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 773aae98ac..26bcc90fc7 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -191,12 +191,12 @@ impl super::Adapter { (glow::VENDOR, glow::RENDERER) }; let (vendor, renderer) = { - let vendor = gl.get_parameter_string(vendor_const); - let renderer = gl.get_parameter_string(renderer_const); + let vendor = unsafe { gl.get_parameter_string(vendor_const) }; + let renderer = unsafe { gl.get_parameter_string(renderer_const) }; (vendor, renderer) }; - let version = gl.get_parameter_string(glow::VERSION); + let version = unsafe { gl.get_parameter_string(glow::VERSION) }; log::info!("Vendor: {}", vendor); log::info!("Renderer: {}", renderer); log::info!("Version: {}", version); @@ -217,7 +217,7 @@ impl super::Adapter { let supports_work_group_params = ver >= (3, 1); let shading_language_version = { - let sl_version = gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION); + let sl_version = unsafe { gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION) }; log::info!("SL version: {}", &sl_version); let (sl_major, sl_minor) = Self::parse_version(&sl_version).ok()?; let value = sl_major as u16 * 100 + sl_minor as u16 * 10; @@ -231,27 +231,27 @@ impl super::Adapter { let is_angle = renderer.contains("ANGLE"); let vertex_shader_storage_blocks = if supports_storage { - gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) } as u32) } else { 0 }; let fragment_shader_storage_blocks = if supports_storage { - gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) } as u32) } else { 0 }; let vertex_shader_storage_textures = if supports_storage { - gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) } as u32) } else { 0 }; let fragment_shader_storage_textures = if supports_storage { - gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) } as u32) } else { 0 }; let max_storage_block_size = if supports_storage { - gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } as u32) } else { 0 }; @@ -422,24 +422,25 @@ impl super::Adapter { color_buffer_float, ); - let max_texture_size = gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) as u32; - let max_texture_3d_size = gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) as u32; + let max_texture_size = unsafe { gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) } as u32; + let max_texture_3d_size = unsafe { gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) } as u32; let min_uniform_buffer_offset_alignment = - gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT) as u32; + (unsafe { gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT) } as u32); let min_storage_buffer_offset_alignment = if ver >= (3, 1) { - gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) as u32 + (unsafe { gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) } as u32) } else { 256 }; let max_uniform_buffers_per_shader_stage = - gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) - .min(gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS)) as u32; + unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) } + .min(unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS) }) + as u32; let max_compute_workgroups_per_dimension = if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 0) - .min(gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 1)) - .min(gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 2)) + unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 0) } + .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 1) }) + .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 2) }) as u32 } else { 0 @@ -449,7 +450,9 @@ impl super::Adapter { max_texture_dimension_1d: max_texture_size, max_texture_dimension_2d: max_texture_size, max_texture_dimension_3d: max_texture_3d_size, - max_texture_array_layers: gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS) as u32, + max_texture_array_layers: unsafe { + gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS) + } as u32, max_bind_groups: crate::MAX_BIND_GROUPS as u32, max_bindings_per_bind_group: 65535, max_dynamic_uniform_buffers_per_pipeline_layout: max_uniform_buffers_per_shader_stage, @@ -459,56 +462,62 @@ impl super::Adapter { max_storage_buffers_per_shader_stage, max_storage_textures_per_shader_stage, max_uniform_buffers_per_shader_stage, - max_uniform_buffer_binding_size: gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE) - as u32, + max_uniform_buffer_binding_size: unsafe { + gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE) + } as u32, max_storage_buffer_binding_size: if ver >= (3, 1) { - gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) + unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } } else { 0 } as u32, max_vertex_buffers: if private_caps .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT) { - gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) } as u32) } else { 16 // should this be different? }, - max_vertex_attributes: (gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) as u32) + max_vertex_attributes: (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) } + as u32) .min(super::MAX_VERTEX_ATTRIBUTES as u32), max_vertex_buffer_array_stride: if private_caps .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT) { - gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) } as u32) } else { !0 }, max_push_constant_size: super::MAX_PUSH_CONSTANTS as u32 * 4, min_uniform_buffer_offset_alignment, min_storage_buffer_offset_alignment, - max_inter_stage_shader_components: gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS) - as u32, + max_inter_stage_shader_components: unsafe { + gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS) + } as u32, max_compute_workgroup_storage_size: if supports_work_group_params { - gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) } as u32) } else { 0 }, max_compute_invocations_per_workgroup: if supports_work_group_params { - gl.get_parameter_i32(glow::MAX_COMPUTE_WORK_GROUP_INVOCATIONS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_WORK_GROUP_INVOCATIONS) } as u32) } else { 0 }, max_compute_workgroup_size_x: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 0) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 0) } + as u32) } else { 0 }, max_compute_workgroup_size_y: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 1) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 1) } + as u32) } else { 0 }, max_compute_workgroup_size_z: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 2) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 2) } + as u32) } else { 0 }, @@ -578,27 +587,22 @@ impl super::Adapter { unsafe fn create_shader_clear_program( gl: &glow::Context, ) -> (glow::Program, glow::UniformLocation) { - let program = gl - .create_program() - .expect("Could not create shader program"); - let vertex = gl - .create_shader(glow::VERTEX_SHADER) - .expect("Could not create shader"); - gl.shader_source(vertex, include_str!("./shaders/clear.vert")); - gl.compile_shader(vertex); - let fragment = gl - .create_shader(glow::FRAGMENT_SHADER) - .expect("Could not create shader"); - gl.shader_source(fragment, include_str!("./shaders/clear.frag")); - gl.compile_shader(fragment); - gl.attach_shader(program, vertex); - gl.attach_shader(program, fragment); - gl.link_program(program); - let color_uniform_location = gl - .get_uniform_location(program, "color") + let program = unsafe { gl.create_program() }.expect("Could not create shader program"); + let vertex = + unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(vertex, include_str!("./shaders/clear.vert")) }; + unsafe { gl.compile_shader(vertex) }; + let fragment = + unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(fragment, include_str!("./shaders/clear.frag")) }; + unsafe { gl.compile_shader(fragment) }; + unsafe { gl.attach_shader(program, vertex) }; + unsafe { gl.attach_shader(program, fragment) }; + unsafe { gl.link_program(program) }; + let color_uniform_location = unsafe { gl.get_uniform_location(program, "color") } .expect("Could not find color uniform in shader clear shader"); - gl.delete_shader(vertex); - gl.delete_shader(fragment); + unsafe { gl.delete_shader(vertex) }; + unsafe { gl.delete_shader(fragment) }; (program, color_uniform_location) } @@ -611,24 +615,22 @@ impl crate::Adapter for super::Adapter { _limits: &wgt::Limits, ) -> Result, crate::DeviceError> { let gl = &self.shared.context.lock(); - gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1); - gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1); - let main_vao = gl - .create_vertex_array() - .map_err(|_| crate::DeviceError::OutOfMemory)?; - gl.bind_vertex_array(Some(main_vao)); - - let zero_buffer = gl - .create_buffer() - .map_err(|_| crate::DeviceError::OutOfMemory)?; - gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)); + unsafe { gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1) }; + unsafe { gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1) }; + let main_vao = + unsafe { gl.create_vertex_array() }.map_err(|_| crate::DeviceError::OutOfMemory)?; + unsafe { gl.bind_vertex_array(Some(main_vao)) }; + + let zero_buffer = + unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?; + unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)) }; let zeroes = vec![0u8; super::ZERO_BUFFER_SIZE]; - gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW); + unsafe { gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW) }; // Compile the shader program we use for doing manual clears to work around Mesa fastclear // bug. let (shader_clear_program, shader_clear_program_color_uniform_location) = - Self::create_shader_clear_program(gl); + unsafe { Self::create_shader_clear_program(gl) }; Ok(crate::OpenDevice { device: super::Device { @@ -640,11 +642,9 @@ impl crate::Adapter for super::Adapter { queue: super::Queue { shared: Arc::clone(&self.shared), features, - draw_fbo: gl - .create_framebuffer() + draw_fbo: unsafe { gl.create_framebuffer() } .map_err(|_| crate::DeviceError::OutOfMemory)?, - copy_fbo: gl - .create_framebuffer() + copy_fbo: unsafe { gl.create_framebuffer() } .map_err(|_| crate::DeviceError::OutOfMemory)?, shader_clear_program, shader_clear_program_color_uniform_location, @@ -664,11 +664,12 @@ impl crate::Adapter for super::Adapter { use wgt::TextureFormat as Tf; let sample_count = { - let max_samples = self - .shared - .context - .lock() - .get_parameter_i32(glow::MAX_SAMPLES); + let max_samples = unsafe { + self.shared + .context + .lock() + .get_parameter_i32(glow::MAX_SAMPLES) + }; if max_samples >= 8 { Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 | Tfc::MULTISAMPLE_X8 } else if max_samples >= 4 { @@ -865,16 +866,16 @@ impl super::AdapterShared { .private_caps .contains(super::PrivateCapabilities::GET_BUFFER_SUB_DATA) { - gl.get_buffer_sub_data(target, offset, dst_data); + unsafe { gl.get_buffer_sub_data(target, offset, dst_data) }; } else { log::error!("Fake map"); let length = dst_data.len(); let buffer_mapping = - gl.map_buffer_range(target, offset, length as _, glow::MAP_READ_BIT); + unsafe { gl.map_buffer_range(target, offset, length as _, glow::MAP_READ_BIT) }; - std::ptr::copy_nonoverlapping(buffer_mapping, dst_data.as_mut_ptr(), length); + unsafe { std::ptr::copy_nonoverlapping(buffer_mapping, dst_data.as_mut_ptr(), length) }; - gl.unmap_buffer(target); + unsafe { gl.unmap_buffer(target) }; } } } diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 196894c0a2..116e228f4c 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -168,21 +168,21 @@ impl super::Device { naga::ShaderStage::Compute => glow::COMPUTE_SHADER, }; - let raw = gl.create_shader(target).unwrap(); + let raw = unsafe { gl.create_shader(target) }.unwrap(); #[cfg(not(target_arch = "wasm32"))] if gl.supports_debug() { //TODO: remove all transmutes from `object_label` // https://github.com/grovesNL/glow/issues/186 - gl.object_label(glow::SHADER, mem::transmute(raw), label); + unsafe { gl.object_label(glow::SHADER, mem::transmute(raw), label) }; } - gl.shader_source(raw, shader); - gl.compile_shader(raw); + unsafe { gl.shader_source(raw, shader) }; + unsafe { gl.compile_shader(raw) }; log::info!("\tCompiled shader {:?}", raw); - let compiled_ok = gl.get_shader_compile_status(raw); - let msg = gl.get_shader_info_log(raw); + let compiled_ok = unsafe { gl.get_shader_compile_status(raw) }; + let msg = unsafe { gl.get_shader_info_log(raw) }; if compiled_ok { if !msg.is_empty() { log::warn!("\tCompile: {}", msg); @@ -272,11 +272,11 @@ impl super::Device { #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>, multiview: Option, ) -> Result { - let program = gl.create_program().unwrap(); + let program = unsafe { gl.create_program() }.unwrap(); #[cfg(not(target_arch = "wasm32"))] if let Some(label) = label { if gl.supports_debug() { - gl.object_label(glow::PROGRAM, mem::transmute(program), Some(label)); + unsafe { gl.object_label(glow::PROGRAM, mem::transmute(program), Some(label)) }; } } @@ -306,28 +306,30 @@ impl super::Device { }; let shader_src = format!("#version {} es \n void main(void) {{}}", version,); log::info!("Only vertex shader is present. Creating an empty fragment shader",); - let shader = Self::compile_shader( - gl, - &shader_src, - naga::ShaderStage::Fragment, - Some("(wgpu internal) dummy fragment shader"), - )?; + let shader = unsafe { + Self::compile_shader( + gl, + &shader_src, + naga::ShaderStage::Fragment, + Some("(wgpu internal) dummy fragment shader"), + ) + }?; shaders_to_delete.push(shader); } for &shader in shaders_to_delete.iter() { - gl.attach_shader(program, shader); + unsafe { gl.attach_shader(program, shader) }; } - gl.link_program(program); + unsafe { gl.link_program(program) }; for shader in shaders_to_delete { - gl.delete_shader(shader); + unsafe { gl.delete_shader(shader) }; } log::info!("\tLinked program {:?}", program); - let linked_ok = gl.get_program_link_status(program); - let msg = gl.get_program_info_log(program); + let linked_ok = unsafe { gl.get_program_link_status(program) }; + let msg = unsafe { gl.get_program_info_log(program) }; if !linked_ok { return Err(crate::PipelineError::Linkage(has_stages, msg)); } @@ -342,16 +344,17 @@ impl super::Device { { // This remapping is only needed if we aren't able to put the binding layout // in the shader. We can't remap storage buffers this way. - gl.use_program(Some(program)); + unsafe { gl.use_program(Some(program)) }; for (ref name, (register, slot)) in name_binding_map { log::trace!("Get binding {:?} from program {:?}", name, program); match register { super::BindingRegister::UniformBuffers => { - let index = gl.get_uniform_block_index(program, name).unwrap(); - gl.uniform_block_binding(program, index, slot as _); + let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap(); + unsafe { gl.uniform_block_binding(program, index, slot as _) }; } super::BindingRegister::StorageBuffers => { - let index = gl.get_shader_storage_block_index(program, name).unwrap(); + let index = + unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap(); log::error!( "Unable to re-map shader storage block {} to {}", name, @@ -360,28 +363,30 @@ impl super::Device { return Err(crate::DeviceError::Lost.into()); } super::BindingRegister::Textures | super::BindingRegister::Images => { - gl.uniform_1_i32( - gl.get_uniform_location(program, name).as_ref(), - slot as _, - ); + unsafe { + gl.uniform_1_i32( + gl.get_uniform_location(program, name).as_ref(), + slot as _, + ) + }; } } } } let mut uniforms: [super::UniformDesc; super::MAX_PUSH_CONSTANTS] = Default::default(); - let count = gl.get_active_uniforms(program); + let count = unsafe { gl.get_active_uniforms(program) }; let mut offset = 0; for uniform in 0..count { let glow::ActiveUniform { utype, name, .. } = - gl.get_active_uniform(program, uniform).unwrap(); + unsafe { gl.get_active_uniform(program, uniform) }.unwrap(); if conv::is_sampler(utype) { continue; } - if let Some(location) = gl.get_uniform_location(program, &name) { + if let Some(location) = unsafe { gl.get_uniform_location(program, &name) } { if uniforms[offset / 4].location.is_some() { panic!("Offset already occupied") } @@ -410,10 +415,10 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { let gl = &self.shared.context.lock(); - gl.delete_vertex_array(self.main_vao); - gl.delete_framebuffer(queue.draw_fbo); - gl.delete_framebuffer(queue.copy_fbo); - gl.delete_buffer(queue.zero_buffer); + unsafe { gl.delete_vertex_array(self.main_vao) }; + unsafe { gl.delete_framebuffer(queue.draw_fbo) }; + unsafe { gl.delete_framebuffer(queue.copy_fbo) }; + unsafe { gl.delete_buffer(queue.zero_buffer) }; } unsafe fn create_buffer( @@ -468,8 +473,8 @@ impl crate::Device for super::Device { map_flags |= glow::MAP_WRITE_BIT; } - let raw = Some(gl.create_buffer().unwrap()); - gl.bind_buffer(target, raw); + let raw = Some(unsafe { gl.create_buffer() }.unwrap()); + unsafe { gl.bind_buffer(target, raw) }; let raw_size = desc .size .try_into() @@ -486,7 +491,7 @@ impl crate::Device for super::Device { map_flags |= glow::MAP_COHERENT_BIT; } } - gl.buffer_storage(target, raw_size, None, map_flags); + unsafe { gl.buffer_storage(target, raw_size, None, map_flags) }; } else { assert!(!is_coherent); let usage = if is_host_visible { @@ -498,10 +503,10 @@ impl crate::Device for super::Device { } else { glow::STATIC_DRAW }; - gl.buffer_data_size(target, raw_size, usage); + unsafe { gl.buffer_data_size(target, raw_size, usage) }; } - gl.bind_buffer(target, None); + unsafe { gl.bind_buffer(target, None) }; if !is_coherent && desc.usage.contains(crate::BufferUses::MAP_WRITE) { map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT; @@ -511,7 +516,7 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::BUFFER, mem::transmute(raw), Some(label)); + unsafe { gl.object_label(glow::BUFFER, mem::transmute(raw), Some(label)) }; } } @@ -532,7 +537,7 @@ impl crate::Device for super::Device { unsafe fn destroy_buffer(&self, buffer: super::Buffer) { if let Some(raw) = buffer.raw { let gl = &self.shared.context.lock(); - gl.delete_buffer(raw); + unsafe { gl.delete_buffer(raw) }; } } @@ -550,21 +555,23 @@ impl crate::Device for super::Device { } Some(raw) => { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; let ptr = if let Some(ref map_read_allocation) = buffer.data { let mut guard = map_read_allocation.lock().unwrap(); let slice = guard.as_mut_slice(); - self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice); + unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) }; slice.as_mut_ptr() } else { - gl.map_buffer_range( - buffer.target, - range.start as i32, - (range.end - range.start) as i32, - buffer.map_flags, - ) + unsafe { + gl.map_buffer_range( + buffer.target, + range.start as i32, + (range.end - range.start) as i32, + buffer.map_flags, + ) + } }; - gl.bind_buffer(buffer.target, None); + unsafe { gl.bind_buffer(buffer.target, None) }; ptr } }; @@ -577,9 +584,9 @@ impl crate::Device for super::Device { if let Some(raw) = buffer.raw { if buffer.data.is_none() { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); - gl.unmap_buffer(buffer.target); - gl.bind_buffer(buffer.target, None); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; + unsafe { gl.unmap_buffer(buffer.target) }; + unsafe { gl.bind_buffer(buffer.target, None) }; } } Ok(()) @@ -590,13 +597,15 @@ impl crate::Device for super::Device { { if let Some(raw) = buffer.raw { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; for range in ranges { - gl.flush_mapped_buffer_range( - buffer.target, - range.start as i32, - (range.end - range.start) as i32, - ); + unsafe { + gl.flush_mapped_buffer_range( + buffer.target, + range.start as i32, + (range.end - range.start) as i32, + ) + }; } } } @@ -625,89 +634,105 @@ impl crate::Device for super::Device { && desc.dimension == wgt::TextureDimension::D2 && desc.size.depth_or_array_layers == 1 { - let raw = gl.create_renderbuffer().unwrap(); - gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)); + let raw = unsafe { gl.create_renderbuffer().unwrap() }; + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) }; if desc.sample_count > 1 { - gl.renderbuffer_storage_multisample( - glow::RENDERBUFFER, - desc.sample_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.renderbuffer_storage_multisample( + glow::RENDERBUFFER, + desc.sample_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } else { - gl.renderbuffer_storage( - glow::RENDERBUFFER, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.renderbuffer_storage( + glow::RENDERBUFFER, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::RENDERBUFFER, mem::transmute(raw), Some(label)); + unsafe { + gl.object_label(glow::RENDERBUFFER, mem::transmute(raw), Some(label)) + }; } } - gl.bind_renderbuffer(glow::RENDERBUFFER, None); + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; (super::TextureInner::Renderbuffer { raw }, false) } else { - let raw = gl.create_texture().unwrap(); + let raw = unsafe { gl.create_texture().unwrap() }; let (target, is_3d, is_cubemap) = super::Texture::get_info_from_desc(&mut copy_size, desc); - gl.bind_texture(target, Some(raw)); + unsafe { gl.bind_texture(target, Some(raw)) }; //Note: this has to be done before defining the storage! match desc.format.describe().sample_type { wgt::TextureSampleType::Float { filterable: false } | wgt::TextureSampleType::Uint | wgt::TextureSampleType::Sint => { // reset default filtering mode - gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32); - gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32); + unsafe { + gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32) + }; + unsafe { + gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32) + }; } wgt::TextureSampleType::Float { filterable: true } | wgt::TextureSampleType::Depth => {} } if is_3d { - gl.tex_storage_3d( - target, - desc.mip_level_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - desc.size.depth_or_array_layers as i32, - ); + unsafe { + gl.tex_storage_3d( + target, + desc.mip_level_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + desc.size.depth_or_array_layers as i32, + ) + }; } else if desc.sample_count > 1 { - gl.tex_storage_2d_multisample( - target, - desc.sample_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - true, - ); + unsafe { + gl.tex_storage_2d_multisample( + target, + desc.sample_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + true, + ) + }; } else { - gl.tex_storage_2d( - target, - desc.mip_level_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.tex_storage_2d( + target, + desc.mip_level_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::TEXTURE, mem::transmute(raw), Some(label)); + unsafe { gl.object_label(glow::TEXTURE, mem::transmute(raw), Some(label)) }; } } - gl.bind_texture(target, None); + unsafe { gl.bind_texture(target, None) }; (super::TextureInner::Texture { raw, target }, is_cubemap) }; @@ -731,11 +756,11 @@ impl crate::Device for super::Device { let gl = &self.shared.context.lock(); match texture.inner { super::TextureInner::Renderbuffer { raw, .. } => { - gl.delete_renderbuffer(raw); + unsafe { gl.delete_renderbuffer(raw) }; } super::TextureInner::DefaultRenderbuffer => {} super::TextureInner::Texture { raw, .. } => { - gl.delete_texture(raw); + unsafe { gl.delete_texture(raw) }; } } } @@ -777,29 +802,35 @@ impl crate::Device for super::Device { ) -> Result { let gl = &self.shared.context.lock(); - let raw = gl.create_sampler().unwrap(); + let raw = unsafe { gl.create_sampler().unwrap() }; let (min, mag) = conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter); - gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32); - gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32); + unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) }; + unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) }; - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_S, - conv::map_address_mode(desc.address_modes[0]) as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_T, - conv::map_address_mode(desc.address_modes[1]) as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_R, - conv::map_address_mode(desc.address_modes[2]) as i32, - ); + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_S, + conv::map_address_mode(desc.address_modes[0]) as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_T, + conv::map_address_mode(desc.address_modes[1]) as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_R, + conv::map_address_mode(desc.address_modes[2]) as i32, + ) + }; if let Some(border_color) = desc.border_color { let border = match border_color { @@ -809,37 +840,43 @@ impl crate::Device for super::Device { wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0], wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4], }; - gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border); + unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) }; } if let Some(ref range) = desc.lod_clamp { - gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, range.start); - gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, range.end); + unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, range.start) }; + unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, range.end) }; } if let Some(anisotropy) = desc.anisotropy_clamp { - gl.sampler_parameter_i32(raw, glow::TEXTURE_MAX_ANISOTROPY, anisotropy.get() as i32); + unsafe { + gl.sampler_parameter_i32(raw, glow::TEXTURE_MAX_ANISOTROPY, anisotropy.get() as i32) + }; } //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0); if let Some(compare) = desc.compare { - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_COMPARE_MODE, - glow::COMPARE_REF_TO_TEXTURE as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_COMPARE_FUNC, - conv::map_compare_func(compare) as i32, - ); + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_COMPARE_MODE, + glow::COMPARE_REF_TO_TEXTURE as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_COMPARE_FUNC, + conv::map_compare_func(compare) as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::SAMPLER, mem::transmute(raw), Some(label)); + unsafe { gl.object_label(glow::SAMPLER, mem::transmute(raw), Some(label)) }; } } @@ -847,7 +884,7 @@ impl crate::Device for super::Device { } unsafe fn destroy_sampler(&self, sampler: super::Sampler) { let gl = &self.shared.context.lock(); - gl.delete_sampler(sampler.raw); + unsafe { gl.delete_sampler(sampler.raw) }; } unsafe fn create_command_encoder( @@ -1035,7 +1072,8 @@ impl crate::Device for super::Device { .as_ref() .map(|fs| (naga::ShaderStage::Fragment, fs)), ); - let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview)?; + let inner = + unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?; let (vertex_buffers, vertex_attributes) = { let mut buffers = Vec::new(); @@ -1095,7 +1133,7 @@ impl crate::Device for super::Device { } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { let gl = &self.shared.context.lock(); - gl.delete_program(pipeline.inner.program); + unsafe { gl.delete_program(pipeline.inner.program) }; } unsafe fn create_compute_pipeline( @@ -1104,13 +1142,13 @@ impl crate::Device for super::Device { ) -> Result { let gl = &self.shared.context.lock(); let shaders = iter::once((naga::ShaderStage::Compute, &desc.stage)); - let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label, None)?; + let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?; Ok(super::ComputePipeline { inner }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { let gl = &self.shared.context.lock(); - gl.delete_program(pipeline.inner.program); + unsafe { gl.delete_program(pipeline.inner.program) }; } #[cfg_attr(target_arch = "wasm32", allow(unused))] @@ -1123,9 +1161,8 @@ impl crate::Device for super::Device { let mut queries = Vec::with_capacity(desc.count as usize); for i in 0..desc.count { - let query = gl - .create_query() - .map_err(|_| crate::DeviceError::OutOfMemory)?; + let query = + unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?; #[cfg(not(target_arch = "wasm32"))] if gl.supports_debug() { use std::fmt::Write; @@ -1133,7 +1170,9 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { temp_string.clear(); let _ = write!(temp_string, "{}[{}]", label, i); - gl.object_label(glow::QUERY, mem::transmute(query), Some(&temp_string)); + unsafe { + gl.object_label(glow::QUERY, mem::transmute(query), Some(&temp_string)) + }; } } queries.push(query); @@ -1150,7 +1189,7 @@ impl crate::Device for super::Device { unsafe fn destroy_query_set(&self, set: super::QuerySet) { let gl = &self.shared.context.lock(); for &query in set.queries.iter() { - gl.delete_query(query); + unsafe { gl.delete_query(query) }; } } unsafe fn create_fence(&self) -> Result { @@ -1162,7 +1201,7 @@ impl crate::Device for super::Device { unsafe fn destroy_fence(&self, fence: super::Fence) { let gl = &self.shared.context.lock(); for (_, sync) in fence.pending { - gl.delete_sync(sync); + unsafe { gl.delete_sync(sync) }; } } unsafe fn get_fence_value( @@ -1190,7 +1229,9 @@ impl crate::Device for super::Device { .iter() .find(|&&(value, _)| value >= wait_value) .unwrap(); - match gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) { + match unsafe { + gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) + } { // for some reason firefox returns WAIT_FAILED, to investigate #[cfg(target_arch = "wasm32")] glow::WAIT_FAILED => { @@ -1208,16 +1249,19 @@ impl crate::Device for super::Device { unsafe fn start_capture(&self) -> bool { #[cfg(feature = "renderdoc")] - return self - .render_doc - .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut()); + return unsafe { + self.render_doc + .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut()) + }; #[allow(unreachable_code)] false } unsafe fn stop_capture(&self) { #[cfg(feature = "renderdoc")] - self.render_doc - .end_frame_capture(ptr::null_mut(), ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(ptr::null_mut(), ptr::null_mut()) + } } } diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index e2c2c01f9a..d813ae522e 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -94,11 +94,11 @@ unsafe extern "system" fn egl_debug_proc( EGL_DEBUG_MSG_INFO_KHR => log::Level::Info, _ => log::Level::Debug, }; - let command = ffi::CStr::from_ptr(command_raw).to_string_lossy(); + let command = unsafe { ffi::CStr::from_ptr(command_raw) }.to_string_lossy(); let message = if message_raw.is_null() { "".into() } else { - ffi::CStr::from_ptr(message_raw).to_string_lossy() + unsafe { ffi::CStr::from_ptr(message_raw) }.to_string_lossy() }; log::log!( @@ -122,7 +122,7 @@ fn open_x_display() -> Option<(ptr::NonNull, libloading::Library)> unsafe fn find_library(paths: &[&str]) -> Option { for path in paths { - match libloading::Library::new(path) { + match unsafe { libloading::Library::new(path) } { Ok(lib) => return Some(lib), _ => continue, }; @@ -629,11 +629,15 @@ impl crate::Instance for Instance { #[cfg(not(feature = "emscripten"))] let egl_result = if cfg!(windows) { - egl::DynamicInstance::::load_required_from_filename("libEGL.dll") + unsafe { + egl::DynamicInstance::::load_required_from_filename("libEGL.dll") + } } else if cfg!(any(target_os = "macos", target_os = "ios")) { - egl::DynamicInstance::::load_required_from_filename("libEGL.dylib") + unsafe { + egl::DynamicInstance::::load_required_from_filename("libEGL.dylib") + } } else { - egl::DynamicInstance::::load_required() + unsafe { egl::DynamicInstance::::load_required() } }; let egl = match egl_result { Ok(egl) => Arc::new(egl), @@ -734,8 +738,9 @@ impl crate::Instance for Instance { && client_ext_str.contains("EGL_KHR_debug") { log::info!("Enabling EGL debug output"); - let function: EglDebugMessageControlFun = - std::mem::transmute(egl.get_proc_address("eglDebugMessageControlKHR").unwrap()); + let function: EglDebugMessageControlFun = unsafe { + std::mem::transmute(egl.get_proc_address("eglDebugMessageControlKHR").unwrap()) + }; let attributes = [ EGL_DEBUG_MSG_CRITICAL_KHR as egl::Attrib, 1, @@ -747,7 +752,7 @@ impl crate::Instance for Instance { 1, egl::ATTRIB_NONE, ]; - (function)(Some(egl_debug_proc), attributes.as_ptr()); + unsafe { (function)(Some(egl_debug_proc), attributes.as_ptr()) }; } let inner = Inner::create(desc.flags, egl, display)?; @@ -786,7 +791,9 @@ impl crate::Instance for Instance { .get_config_attrib(inner.egl.display, inner.config, egl::NATIVE_VISUAL_ID) .unwrap(); - let ret = ANativeWindow_setBuffersGeometry(handle.a_native_window, 0, 0, format); + let ret = unsafe { + ANativeWindow_setBuffersGeometry(handle.a_native_window, 0, 0, format) + }; if ret != 0 { log::error!("Error returned from ANativeWindow_setBuffersGeometry"); @@ -858,33 +865,36 @@ impl crate::Instance for Instance { let inner = self.inner.lock(); inner.egl.make_current(); - let gl = glow::Context::from_loader_function(|name| { - inner - .egl - .instance - .get_proc_address(name) - .map_or(ptr::null(), |p| p as *const _) - }); + let gl = unsafe { + glow::Context::from_loader_function(|name| { + inner + .egl + .instance + .get_proc_address(name) + .map_or(ptr::null(), |p| p as *const _) + }) + }; if self.flags.contains(crate::InstanceFlags::DEBUG) && gl.supports_debug() { - log::info!( - "Max label length: {}", + log::info!("Max label length: {}", unsafe { gl.get_parameter_i32(glow::MAX_LABEL_LENGTH) - ); + }); } if self.flags.contains(crate::InstanceFlags::VALIDATION) && gl.supports_debug() { log::info!("Enabling GLES debug output"); - gl.enable(glow::DEBUG_OUTPUT); - gl.debug_message_callback(gl_debug_message_callback); + unsafe { gl.enable(glow::DEBUG_OUTPUT) }; + unsafe { gl.debug_message_callback(gl_debug_message_callback) }; } inner.egl.unmake_current(); - super::Adapter::expose(AdapterContext { - glow: Mutex::new(gl), - egl: Some(inner.egl.clone()), - }) + unsafe { + super::Adapter::expose(AdapterContext { + glow: Mutex::new(gl), + egl: Some(inner.egl.clone()), + }) + } .into_iter() .collect() } @@ -901,10 +911,12 @@ impl super::Adapter { pub unsafe fn new_external( fun: impl FnMut(&str) -> *const ffi::c_void, ) -> Option> { - Self::expose(AdapterContext { - glow: Mutex::new(glow::Context::from_loader_function(fun)), - egl: None, - }) + unsafe { + Self::expose(AdapterContext { + glow: Mutex::new(glow::Context::from_loader_function(fun)), + egl: None, + }) + } } pub fn adapter_context(&self) -> &AdapterContext { @@ -968,27 +980,29 @@ impl Surface { crate::SurfaceError::Lost })?; - gl.disable(glow::SCISSOR_TEST); - gl.color_mask(true, true, true, true); + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.color_mask(true, true, true, true) }; - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(sc.framebuffer)); + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(sc.framebuffer)) }; // Note the Y-flipping here. GL's presentation is not flipped, // but main rendering is. Therefore, we Y-flip the output positions // in the shader, and also this blit. - gl.blit_framebuffer( - 0, - sc.extent.height as i32, - sc.extent.width as i32, - 0, - 0, - 0, - sc.extent.width as i32, - sc.extent.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); + unsafe { + gl.blit_framebuffer( + 0, + sc.extent.height as i32, + sc.extent.width as i32, + 0, + 0, + 0, + sc.extent.width as i32, + sc.extent.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; self.egl .instance @@ -1015,8 +1029,8 @@ impl Surface { let gl = &device.shared.context.lock(); match self.swapchain.take() { Some(sc) => { - gl.delete_renderbuffer(sc.renderbuffer); - gl.delete_framebuffer(sc.framebuffer); + unsafe { gl.delete_renderbuffer(sc.renderbuffer) }; + unsafe { gl.delete_framebuffer(sc.framebuffer) }; Some((sc.surface, sc.wl_window)) } None => None, @@ -1039,7 +1053,7 @@ impl crate::Surface for Surface { ) -> Result<(), crate::SurfaceError> { use raw_window_handle::RawWindowHandle as Rwh; - let (surface, wl_window) = match self.unconfigure_impl(device) { + let (surface, wl_window) = match unsafe { self.unconfigure_impl(device) } { Some(pair) => pair, None => { let mut wl_window = None; @@ -1064,9 +1078,9 @@ impl crate::Surface for Surface { (WindowKind::Wayland, Rwh::Wayland(handle)) => { let library = self.wsi.library.as_ref().unwrap(); let wl_egl_window_create: libloading::Symbol = - library.get(b"wl_egl_window_create").unwrap(); - let window = wl_egl_window_create(handle.surface, 640, 480) as *mut _ - as *mut std::ffi::c_void; + unsafe { library.get(b"wl_egl_window_create") }.unwrap(); + let window = unsafe { wl_egl_window_create(handle.surface, 640, 480) } + as *mut _ as *mut std::ffi::c_void; wl_window = Some(window); window } @@ -1143,12 +1157,14 @@ impl crate::Surface for Surface { &attributes_usize, ) } - _ => self.egl.instance.create_window_surface( - self.egl.display, - self.config, - native_window_ptr, - Some(&attributes), - ), + _ => unsafe { + self.egl.instance.create_window_surface( + self.egl.display, + self.config, + native_window_ptr, + Some(&attributes), + ) + }, }; match raw_result { @@ -1164,36 +1180,42 @@ impl crate::Surface for Surface { if let Some(window) = wl_window { let library = self.wsi.library.as_ref().unwrap(); let wl_egl_window_resize: libloading::Symbol = - library.get(b"wl_egl_window_resize").unwrap(); - wl_egl_window_resize( - window, - config.extent.width as i32, - config.extent.height as i32, - 0, - 0, - ); + unsafe { library.get(b"wl_egl_window_resize") }.unwrap(); + unsafe { + wl_egl_window_resize( + window, + config.extent.width as i32, + config.extent.height as i32, + 0, + 0, + ) + }; } let format_desc = device.shared.describe_texture_format(config.format); let gl = &device.shared.context.lock(); - let renderbuffer = gl.create_renderbuffer().unwrap(); - gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)); - gl.renderbuffer_storage( - glow::RENDERBUFFER, - format_desc.internal, - config.extent.width as _, - config.extent.height as _, - ); - let framebuffer = gl.create_framebuffer().unwrap(); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)); - gl.framebuffer_renderbuffer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::RENDERBUFFER, - Some(renderbuffer), - ); - gl.bind_renderbuffer(glow::RENDERBUFFER, None); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); + let renderbuffer = unsafe { gl.create_renderbuffer() }.unwrap(); + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)) }; + unsafe { + gl.renderbuffer_storage( + glow::RENDERBUFFER, + format_desc.internal, + config.extent.width as _, + config.extent.height as _, + ) + }; + let framebuffer = unsafe { gl.create_framebuffer() }.unwrap(); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_renderbuffer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::RENDERBUFFER, + Some(renderbuffer), + ) + }; + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; self.swapchain = Some(Swapchain { surface, @@ -1210,20 +1232,21 @@ impl crate::Surface for Surface { } unsafe fn unconfigure(&mut self, device: &super::Device) { - if let Some((surface, wl_window)) = self.unconfigure_impl(device) { + if let Some((surface, wl_window)) = unsafe { self.unconfigure_impl(device) } { self.egl .instance .destroy_surface(self.egl.display, surface) .unwrap(); if let Some(window) = wl_window { - let wl_egl_window_destroy: libloading::Symbol = self - .wsi - .library - .as_ref() - .expect("unsupported window") - .get(b"wl_egl_window_destroy") - .unwrap(); - wl_egl_window_destroy(window); + let wl_egl_window_destroy: libloading::Symbol = unsafe { + self.wsi + .library + .as_ref() + .expect("unsupported window") + .get(b"wl_egl_window_destroy") + } + .unwrap(); + unsafe { wl_egl_window_destroy(window) }; } } } diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 35e55befca..75770c501c 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -30,49 +30,51 @@ fn is_layered_target(target: super::BindTarget) -> bool { impl super::Queue { /// Performs a manual shader clear, used as a workaround for a clearing bug on mesa unsafe fn perform_shader_clear(&self, gl: &glow::Context, draw_buffer: u32, color: [f32; 4]) { - gl.use_program(Some(self.shader_clear_program)); - gl.uniform_4_f32( - Some(&self.shader_clear_program_color_uniform_location), - color[0], - color[1], - color[2], - color[3], - ); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]); - gl.draw_arrays(glow::TRIANGLES, 0, 3); + unsafe { gl.use_program(Some(self.shader_clear_program)) }; + unsafe { + gl.uniform_4_f32( + Some(&self.shader_clear_program_color_uniform_location), + color[0], + color[1], + color[2], + color[3], + ) + }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) }; + unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; if self.draw_buffer_count != 0 { // Reset the draw buffers to what they were before the clear let indices = (0..self.draw_buffer_count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); - gl.draw_buffers(&indices); + unsafe { gl.draw_buffers(&indices) }; } #[cfg(not(target_arch = "wasm32"))] for draw_buffer in 0..self.draw_buffer_count as u32 { - gl.disable_draw_buffer(glow::BLEND, draw_buffer); + unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; } } unsafe fn reset_state(&mut self, gl: &glow::Context) { - gl.use_program(None); - gl.bind_framebuffer(glow::FRAMEBUFFER, None); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.disable(glow::POLYGON_OFFSET_FILL); + unsafe { gl.use_program(None) }; + unsafe { gl.bind_framebuffer(glow::FRAMEBUFFER, None) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) }; if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) { - gl.disable(glow::DEPTH_CLAMP); + unsafe { gl.disable(glow::DEPTH_CLAMP) }; } - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None) }; self.current_index_buffer = None; } @@ -85,45 +87,60 @@ impl super::Queue { ) { match view.inner { super::TextureInner::Renderbuffer { raw } => { - gl.framebuffer_renderbuffer(fbo_target, attachment, glow::RENDERBUFFER, Some(raw)); + unsafe { + gl.framebuffer_renderbuffer( + fbo_target, + attachment, + glow::RENDERBUFFER, + Some(raw), + ) + }; } super::TextureInner::DefaultRenderbuffer => panic!("Unexpected default RBO"), super::TextureInner::Texture { raw, target } => { let num_layers = view.array_layers.end - view.array_layers.start; if num_layers > 1 { #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] - gl.framebuffer_texture_multiview_ovr( - fbo_target, - attachment, - Some(raw), - view.mip_levels.start as i32, - view.array_layers.start as i32, - num_layers as i32, - ); + unsafe { + gl.framebuffer_texture_multiview_ovr( + fbo_target, + attachment, + Some(raw), + view.mip_levels.start as i32, + view.array_layers.start as i32, + num_layers as i32, + ) + }; } else if is_layered_target(target) { - gl.framebuffer_texture_layer( - fbo_target, - attachment, - Some(raw), - view.mip_levels.start as i32, - view.array_layers.start as i32, - ); + unsafe { + gl.framebuffer_texture_layer( + fbo_target, + attachment, + Some(raw), + view.mip_levels.start as i32, + view.array_layers.start as i32, + ) + }; } else if target == glow::TEXTURE_CUBE_MAP { - gl.framebuffer_texture_2d( - fbo_target, - attachment, - CUBEMAP_FACES[view.array_layers.start as usize], - Some(raw), - view.mip_levels.start as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + fbo_target, + attachment, + CUBEMAP_FACES[view.array_layers.start as usize], + Some(raw), + view.mip_levels.start as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - fbo_target, - attachment, - target, - Some(raw), - view.mip_levels.start as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + fbo_target, + attachment, + target, + Some(raw), + view.mip_levels.start as i32, + ) + }; } } } @@ -144,14 +161,16 @@ impl super::Queue { instance_count, } => { if instance_count == 1 { - gl.draw_arrays(topology, start_vertex as i32, vertex_count as i32); + unsafe { gl.draw_arrays(topology, start_vertex as i32, vertex_count as i32) }; } else { - gl.draw_arrays_instanced( - topology, - start_vertex as i32, - vertex_count as i32, - instance_count as i32, - ); + unsafe { + gl.draw_arrays_instanced( + topology, + start_vertex as i32, + vertex_count as i32, + instance_count as i32, + ) + }; } } C::DrawIndexed { @@ -162,42 +181,50 @@ impl super::Queue { base_vertex, instance_count, } => match (base_vertex, instance_count) { - (0, 1) => gl.draw_elements( - topology, - index_count as i32, - index_type, - index_offset as i32, - ), - (0, _) => gl.draw_elements_instanced( - topology, - index_count as i32, - index_type, - index_offset as i32, - instance_count as i32, - ), - (_, 1) => gl.draw_elements_base_vertex( - topology, - index_count as i32, - index_type, - index_offset as i32, - base_vertex, - ), - (_, _) => gl.draw_elements_instanced_base_vertex( - topology, - index_count as _, - index_type, - index_offset as i32, - instance_count as i32, - base_vertex, - ), + (0, 1) => unsafe { + gl.draw_elements( + topology, + index_count as i32, + index_type, + index_offset as i32, + ) + }, + (0, _) => unsafe { + gl.draw_elements_instanced( + topology, + index_count as i32, + index_type, + index_offset as i32, + instance_count as i32, + ) + }, + (_, 1) => unsafe { + gl.draw_elements_base_vertex( + topology, + index_count as i32, + index_type, + index_offset as i32, + base_vertex, + ) + }, + (_, _) => unsafe { + gl.draw_elements_instanced_base_vertex( + topology, + index_count as _, + index_type, + index_offset as i32, + instance_count as i32, + base_vertex, + ) + }, }, C::DrawIndirect { topology, indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)); - gl.draw_arrays_indirect_offset(topology, indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { gl.draw_arrays_indirect_offset(topology, indirect_offset as i32) }; } C::DrawIndexedIndirect { topology, @@ -205,18 +232,20 @@ impl super::Queue { indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)); - gl.draw_elements_indirect_offset(topology, index_type, indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { + gl.draw_elements_indirect_offset(topology, index_type, indirect_offset as i32) + }; } C::Dispatch(group_counts) => { - gl.dispatch_compute(group_counts[0], group_counts[1], group_counts[2]); + unsafe { gl.dispatch_compute(group_counts[0], group_counts[1], group_counts[2]) }; } C::DispatchIndirect { indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DISPATCH_INDIRECT_BUFFER, Some(indirect_buf)); - gl.dispatch_compute_indirect(indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DISPATCH_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { gl.dispatch_compute_indirect(indirect_offset as i32) }; } C::ClearBuffer { ref dst, @@ -239,24 +268,28 @@ impl super::Queue { || dst_target != glow::ELEMENT_ARRAY_BUFFER; if can_use_zero_buffer { - gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)); - gl.bind_buffer(dst_target, Some(buffer)); + unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)) }; + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; let mut dst_offset = range.start; while dst_offset < range.end { let size = (range.end - dst_offset).min(super::ZERO_BUFFER_SIZE as u64); - gl.copy_buffer_sub_data( - glow::COPY_READ_BUFFER, - dst_target, - 0, - dst_offset as i32, - size as i32, - ); + unsafe { + gl.copy_buffer_sub_data( + glow::COPY_READ_BUFFER, + dst_target, + 0, + dst_offset as i32, + size as i32, + ) + }; dst_offset += size; } } else { - gl.bind_buffer(dst_target, Some(buffer)); + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; let zeroes = vec![0u8; (range.end - range.start) as usize]; - gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes); + unsafe { + gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes) + }; } } None => { @@ -289,49 +322,57 @@ impl super::Queue { let size = copy.size.get() as usize; match (src.raw, dst.raw) { (Some(ref src), Some(ref dst)) => { - gl.bind_buffer(copy_src_target, Some(*src)); - gl.bind_buffer(copy_dst_target, Some(*dst)); - gl.copy_buffer_sub_data( - copy_src_target, - copy_dst_target, - copy.src_offset as _, - copy.dst_offset as _, - copy.size.get() as _, - ); + unsafe { gl.bind_buffer(copy_src_target, Some(*src)) }; + unsafe { gl.bind_buffer(copy_dst_target, Some(*dst)) }; + unsafe { + gl.copy_buffer_sub_data( + copy_src_target, + copy_dst_target, + copy.src_offset as _, + copy.dst_offset as _, + copy.size.get() as _, + ) + }; } (Some(src), None) => { let mut data = dst.data.as_ref().unwrap().lock().unwrap(); let dst_data = &mut data.as_mut_slice() [copy.dst_offset as usize..copy.dst_offset as usize + size]; - gl.bind_buffer(copy_src_target, Some(src)); - self.shared.get_buffer_sub_data( - gl, - copy_src_target, - copy.src_offset as i32, - dst_data, - ); + unsafe { gl.bind_buffer(copy_src_target, Some(src)) }; + unsafe { + self.shared.get_buffer_sub_data( + gl, + copy_src_target, + copy.src_offset as i32, + dst_data, + ) + }; } (None, Some(dst)) => { let data = src.data.as_ref().unwrap().lock().unwrap(); let src_data = &data.as_slice() [copy.src_offset as usize..copy.src_offset as usize + size]; - gl.bind_buffer(copy_dst_target, Some(dst)); - gl.buffer_sub_data_u8_slice( - copy_dst_target, - copy.dst_offset as i32, - src_data, - ); + unsafe { gl.bind_buffer(copy_dst_target, Some(dst)) }; + unsafe { + gl.buffer_sub_data_u8_slice( + copy_dst_target, + copy.dst_offset as i32, + src_data, + ) + }; } (None, None) => { todo!() } } - gl.bind_buffer(copy_src_target, None); + unsafe { gl.bind_buffer(copy_src_target, None) }; if is_index_buffer_only_element_dst { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer); + unsafe { + gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer) + }; } else { - gl.bind_buffer(copy_dst_target, None); + unsafe { gl.bind_buffer(copy_dst_target, None) }; } } C::CopyTextureToTexture { @@ -343,61 +384,71 @@ impl super::Queue { ref copy, } => { //TODO: handle 3D copies - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) }; if is_layered_target(src_target) { //TODO: handle GLES without framebuffer_texture_3d - gl.framebuffer_texture_layer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - Some(src), - copy.src_base.mip_level as i32, - copy.src_base.array_layer as i32, - ); + unsafe { + gl.framebuffer_texture_layer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + Some(src), + copy.src_base.mip_level as i32, + copy.src_base.array_layer as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - src_target, - Some(src), - copy.src_base.mip_level as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + src_target, + Some(src), + copy.src_base.mip_level as i32, + ) + }; } - gl.bind_texture(dst_target, Some(dst)); + unsafe { gl.bind_texture(dst_target, Some(dst)) }; if dst_is_cubemap { - gl.copy_tex_sub_image_2d( - CUBEMAP_FACES[copy.dst_base.array_layer as usize], - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_2d( + CUBEMAP_FACES[copy.dst_base.array_layer as usize], + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } else if is_layered_target(dst_target) { - gl.copy_tex_sub_image_3d( - dst_target, - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.dst_base.origin.z as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_3d( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.dst_base.origin.z as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } else { - gl.copy_tex_sub_image_2d( - dst_target, - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_2d( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } } C::CopyBufferToTexture { @@ -419,15 +470,15 @@ impl super::Queue { .rows_per_image .map_or(0, |rpi| format_info.block_dimensions.1 as u32 * rpi.get()); - gl.bind_texture(dst_target, Some(dst)); - gl.pixel_store_i32(glow::UNPACK_ROW_LENGTH, row_texels as i32); - gl.pixel_store_i32(glow::UNPACK_IMAGE_HEIGHT, column_texels as i32); + unsafe { gl.bind_texture(dst_target, Some(dst)) }; + unsafe { gl.pixel_store_i32(glow::UNPACK_ROW_LENGTH, row_texels as i32) }; + unsafe { gl.pixel_store_i32(glow::UNPACK_IMAGE_HEIGHT, column_texels as i32) }; let mut unbind_unpack_buffer = false; if !format_info.is_compressed() { let buffer_data; let unpack_data = match src.raw { Some(buffer) => { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) }; unbind_unpack_buffer = true; glow::PixelUnpackData::BufferOffset(copy.buffer_layout.offset as u32) } @@ -440,76 +491,86 @@ impl super::Queue { }; match dst_target { glow::TEXTURE_3D => { - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_2D_ARRAY => { - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.array_layer as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.array_layer as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_2D => { - gl.tex_sub_image_2d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_2d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP => { - gl.tex_sub_image_2d( - CUBEMAP_FACES[copy.texture_base.array_layer as usize], - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_2d( + CUBEMAP_FACES[copy.texture_base.array_layer as usize], + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP_ARRAY => { //Note: not sure if this is correct! - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } _ => unreachable!(), } @@ -537,7 +598,7 @@ impl super::Queue { let buffer_data; let unpack_data = match src.raw { Some(buffer) => { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) }; unbind_unpack_buffer = true; glow::CompressedPixelUnpackData::BufferRange( offset..offset + bytes_in_upload, @@ -555,48 +616,54 @@ impl super::Queue { glow::TEXTURE_3D | glow::TEXTURE_CUBE_MAP_ARRAY | glow::TEXTURE_2D_ARRAY => { - gl.compressed_tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.internal, + unpack_data, + ) + }; } glow::TEXTURE_2D => { - gl.compressed_tex_sub_image_2d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_2d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.internal, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP => { - gl.compressed_tex_sub_image_2d( - CUBEMAP_FACES[copy.texture_base.array_layer as usize], - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_2d( + CUBEMAP_FACES[copy.texture_base.array_layer as usize], + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.internal, + unpack_data, + ) + }; } _ => unreachable!(), } } if unbind_unpack_buffer { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, None); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, None) }; } } C::CopyTextureToBuffer { @@ -626,31 +693,35 @@ impl super::Queue { bpr.get() / format_info.block_size as u32 }); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) }; //TODO: handle cubemap copies if is_layered_target(src_target) { //TODO: handle GLES without framebuffer_texture_3d - gl.framebuffer_texture_layer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - Some(src), - copy.texture_base.mip_level as i32, - copy.texture_base.array_layer as i32, - ); + unsafe { + gl.framebuffer_texture_layer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + Some(src), + copy.texture_base.mip_level as i32, + copy.texture_base.array_layer as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - src_target, - Some(src), - copy.texture_base.mip_level as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + src_target, + Some(src), + copy.texture_base.mip_level as i32, + ) + }; } let mut buffer_data; let unpack_data = match dst.raw { Some(buffer) => { - gl.pixel_store_i32(glow::PACK_ROW_LENGTH, row_texels as i32); - gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(buffer)); + unsafe { gl.pixel_store_i32(glow::PACK_ROW_LENGTH, row_texels as i32) }; + unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(buffer)) }; glow::PixelPackData::BufferOffset(copy.buffer_layout.offset as u32) } None => { @@ -660,25 +731,27 @@ impl super::Queue { glow::PixelPackData::Slice(dst_data) } }; - gl.read_pixels( - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.read_pixels( + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } C::SetIndexBuffer(buffer) => { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)) }; self.current_index_buffer = Some(buffer); } C::BeginQuery(query, target) => { - gl.begin_query(target, query); + unsafe { gl.begin_query(target, query) }; } C::EndQuery(target) => { - gl.end_query(target); + unsafe { gl.end_query(target) }; } C::CopyQueryResults { ref query_range, @@ -688,17 +761,21 @@ impl super::Queue { } => { self.temp_query_results.clear(); for &query in queries[query_range.start as usize..query_range.end as usize].iter() { - let result = gl.get_query_parameter_u32(query, glow::QUERY_RESULT); + let result = unsafe { gl.get_query_parameter_u32(query, glow::QUERY_RESULT) }; self.temp_query_results.push(result as u64); } - let query_data = slice::from_raw_parts( - self.temp_query_results.as_ptr() as *const u8, - self.temp_query_results.len() * mem::size_of::(), - ); + let query_data = unsafe { + slice::from_raw_parts( + self.temp_query_results.as_ptr() as *const u8, + self.temp_query_results.len() * mem::size_of::(), + ) + }; match dst.raw { Some(buffer) => { - gl.bind_buffer(dst_target, Some(buffer)); - gl.buffer_sub_data_u8_slice(dst_target, dst_offset as i32, query_data); + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; + unsafe { + gl.buffer_sub_data_u8_slice(dst_target, dst_offset as i32, query_data) + }; } None => { let data = &mut dst.data.as_ref().unwrap().lock().unwrap(); @@ -709,73 +786,81 @@ impl super::Queue { } C::ResetFramebuffer { is_default } => { if is_default { - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; } else { - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)); - gl.framebuffer_texture_2d( - glow::DRAW_FRAMEBUFFER, - glow::DEPTH_STENCIL_ATTACHMENT, - glow::TEXTURE_2D, - None, - 0, - ); - for i in 0..crate::MAX_COLOR_ATTACHMENTS { - let target = glow::COLOR_ATTACHMENT0 + i as u32; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) }; + unsafe { gl.framebuffer_texture_2d( glow::DRAW_FRAMEBUFFER, - target, + glow::DEPTH_STENCIL_ATTACHMENT, glow::TEXTURE_2D, None, 0, - ); + ) + }; + for i in 0..crate::MAX_COLOR_ATTACHMENTS { + let target = glow::COLOR_ATTACHMENT0 + i as u32; + unsafe { + gl.framebuffer_texture_2d( + glow::DRAW_FRAMEBUFFER, + target, + glow::TEXTURE_2D, + None, + 0, + ) + }; } } - gl.color_mask(true, true, true, true); - gl.depth_mask(true); - gl.stencil_mask(!0); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); + unsafe { gl.color_mask(true, true, true, true) }; + unsafe { gl.depth_mask(true) }; + unsafe { gl.stencil_mask(!0) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; } C::BindAttachment { attachment, ref view, } => { - self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view); + unsafe { self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view) }; } C::ResolveAttachment { attachment, ref dst, ref size, } => { - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)); - gl.read_buffer(attachment); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)); - self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst); - gl.blit_framebuffer( - 0, - 0, - size.width as i32, - size.height as i32, - 0, - 0, - size.width as i32, - size.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)) }; + unsafe { gl.read_buffer(attachment) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)) }; + unsafe { + self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst) + }; + unsafe { + gl.blit_framebuffer( + 0, + 0, + size.width as i32, + size.height as i32, + 0, + 0, + size.width as i32, + size.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) }; } C::InvalidateAttachments(ref list) => { - gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list); + unsafe { gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list) }; } C::SetDrawColorBuffers(count) => { self.draw_buffer_count = count; let indices = (0..count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); - gl.draw_buffers(&indices); + unsafe { gl.draw_buffers(&indices) }; if self .shared @@ -783,7 +868,7 @@ impl super::Queue { .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER) { for draw_buffer in 0..count as u32 { - gl.disable_draw_buffer(glow::BLEND, draw_buffer); + unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; } } } @@ -798,51 +883,58 @@ impl super::Queue { .contains(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR) && is_srgb { - self.perform_shader_clear(gl, draw_buffer, *color); + unsafe { self.perform_shader_clear(gl, draw_buffer, *color) }; } else { - gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color) }; } } C::ClearColorU(draw_buffer, ref color) => { - gl.clear_buffer_u32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_u32_slice(glow::COLOR, draw_buffer, color) }; } C::ClearColorI(draw_buffer, ref color) => { - gl.clear_buffer_i32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_i32_slice(glow::COLOR, draw_buffer, color) }; } C::ClearDepth(depth) => { - gl.clear_buffer_f32_slice(glow::DEPTH, 0, &[depth]); + unsafe { gl.clear_buffer_f32_slice(glow::DEPTH, 0, &[depth]) }; } C::ClearStencil(value) => { - gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]); + unsafe { gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]) }; } C::ClearDepthAndStencil(depth, stencil_value) => { - gl.clear_buffer_depth_stencil(glow::DEPTH_STENCIL, 0, depth, stencil_value as i32); + unsafe { + gl.clear_buffer_depth_stencil( + glow::DEPTH_STENCIL, + 0, + depth, + stencil_value as i32, + ) + }; } C::BufferBarrier(raw, usage) => { let mut flags = 0; if usage.contains(crate::BufferUses::VERTEX) { flags |= glow::VERTEX_ATTRIB_ARRAY_BARRIER_BIT; - gl.bind_buffer(glow::ARRAY_BUFFER, Some(raw)); - gl.vertex_attrib_pointer_f32(0, 1, glow::BYTE, true, 0, 0); + unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, Some(raw)) }; + unsafe { gl.vertex_attrib_pointer_f32(0, 1, glow::BYTE, true, 0, 0) }; } if usage.contains(crate::BufferUses::INDEX) { flags |= glow::ELEMENT_ARRAY_BARRIER_BIT; - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::UNIFORM) { flags |= glow::UNIFORM_BARRIER_BIT; } if usage.contains(crate::BufferUses::INDIRECT) { flags |= glow::COMMAND_BARRIER_BIT; - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::COPY_SRC) { flags |= glow::PIXEL_BUFFER_BARRIER_BIT; - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::COPY_DST) { flags |= glow::PIXEL_BUFFER_BARRIER_BIT; - gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(raw)) }; } if usage.intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE) { flags |= glow::BUFFER_UPDATE_BARRIER_BIT; @@ -852,7 +944,7 @@ impl super::Queue { ) { flags |= glow::SHADER_STORAGE_BARRIER_BIT; } - gl.memory_barrier(flags); + unsafe { gl.memory_barrier(flags) }; } C::TextureBarrier(usage) => { let mut flags = 0; @@ -874,18 +966,18 @@ impl super::Queue { ) { flags |= glow::FRAMEBUFFER_BARRIER_BIT; } - gl.memory_barrier(flags); + unsafe { gl.memory_barrier(flags) }; } C::SetViewport { ref rect, ref depth, } => { - gl.viewport(rect.x, rect.y, rect.w, rect.h); - gl.depth_range_f32(depth.start, depth.end); + unsafe { gl.viewport(rect.x, rect.y, rect.w, rect.h) }; + unsafe { gl.depth_range_f32(depth.start, depth.end) }; } C::SetScissor(ref rect) => { - gl.scissor(rect.x, rect.y, rect.w, rect.h); - gl.enable(glow::SCISSOR_TEST); + unsafe { gl.scissor(rect.x, rect.y, rect.w, rect.h) }; + unsafe { gl.enable(glow::SCISSOR_TEST) }; } C::SetStencilFunc { face, @@ -893,134 +985,144 @@ impl super::Queue { reference, read_mask, } => { - gl.stencil_func_separate(face, function, reference as i32, read_mask); + unsafe { gl.stencil_func_separate(face, function, reference as i32, read_mask) }; } C::SetStencilOps { face, write_mask, ref ops, } => { - gl.stencil_mask_separate(face, write_mask); - gl.stencil_op_separate(face, ops.fail, ops.depth_fail, ops.pass); + unsafe { gl.stencil_mask_separate(face, write_mask) }; + unsafe { gl.stencil_op_separate(face, ops.fail, ops.depth_fail, ops.pass) }; } C::SetVertexAttribute { buffer, ref buffer_desc, attribute_desc: ref vat, } => { - gl.bind_buffer(glow::ARRAY_BUFFER, buffer); - gl.enable_vertex_attrib_array(vat.location); + unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, buffer) }; + unsafe { gl.enable_vertex_attrib_array(vat.location) }; if buffer.is_none() { match vat.format_desc.attrib_kind { - super::VertexAttribKind::Float => gl.vertex_attrib_format_f32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - true, // always normalized - vat.offset, - ), - super::VertexAttribKind::Integer => gl.vertex_attrib_format_i32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - vat.offset, - ), + super::VertexAttribKind::Float => unsafe { + gl.vertex_attrib_format_f32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + true, // always normalized + vat.offset, + ) + }, + super::VertexAttribKind::Integer => unsafe { + gl.vertex_attrib_format_i32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + vat.offset, + ) + }, } //Note: there is apparently a bug on AMD 3500U: // this call is ignored if the current array is disabled. - gl.vertex_attrib_binding(vat.location, vat.buffer_index); + unsafe { gl.vertex_attrib_binding(vat.location, vat.buffer_index) }; } else { match vat.format_desc.attrib_kind { - super::VertexAttribKind::Float => gl.vertex_attrib_pointer_f32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - true, // always normalized - buffer_desc.stride as i32, - vat.offset as i32, - ), - super::VertexAttribKind::Integer => gl.vertex_attrib_pointer_i32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - buffer_desc.stride as i32, - vat.offset as i32, - ), + super::VertexAttribKind::Float => unsafe { + gl.vertex_attrib_pointer_f32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + true, // always normalized + buffer_desc.stride as i32, + vat.offset as i32, + ) + }, + super::VertexAttribKind::Integer => unsafe { + gl.vertex_attrib_pointer_i32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + buffer_desc.stride as i32, + vat.offset as i32, + ) + }, } - gl.vertex_attrib_divisor(vat.location, buffer_desc.step as u32); + unsafe { gl.vertex_attrib_divisor(vat.location, buffer_desc.step as u32) }; } } C::UnsetVertexAttribute(location) => { - gl.disable_vertex_attrib_array(location); + unsafe { gl.disable_vertex_attrib_array(location) }; } C::SetVertexBuffer { index, ref buffer, ref buffer_desc, } => { - gl.vertex_binding_divisor(index, buffer_desc.step as u32); - gl.bind_vertex_buffer( - index, - Some(buffer.raw), - buffer.offset as i32, - buffer_desc.stride as i32, - ); + unsafe { gl.vertex_binding_divisor(index, buffer_desc.step as u32) }; + unsafe { + gl.bind_vertex_buffer( + index, + Some(buffer.raw), + buffer.offset as i32, + buffer_desc.stride as i32, + ) + }; } C::SetDepth(ref depth) => { - gl.depth_func(depth.function); - gl.depth_mask(depth.mask); + unsafe { gl.depth_func(depth.function) }; + unsafe { gl.depth_mask(depth.mask) }; } C::SetDepthBias(bias) => { if bias.is_enabled() { - gl.enable(glow::POLYGON_OFFSET_FILL); - gl.polygon_offset(bias.constant as f32, bias.slope_scale); + unsafe { gl.enable(glow::POLYGON_OFFSET_FILL) }; + unsafe { gl.polygon_offset(bias.constant as f32, bias.slope_scale) }; } else { - gl.disable(glow::POLYGON_OFFSET_FILL); + unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) }; } } C::ConfigureDepthStencil(aspects) => { if aspects.contains(crate::FormatAspects::DEPTH) { - gl.enable(glow::DEPTH_TEST); + unsafe { gl.enable(glow::DEPTH_TEST) }; } else { - gl.disable(glow::DEPTH_TEST); + unsafe { gl.disable(glow::DEPTH_TEST) }; } if aspects.contains(crate::FormatAspects::STENCIL) { - gl.enable(glow::STENCIL_TEST); + unsafe { gl.enable(glow::STENCIL_TEST) }; } else { - gl.disable(glow::STENCIL_TEST); + unsafe { gl.disable(glow::STENCIL_TEST) }; } } C::SetAlphaToCoverage(enabled) => { if enabled { - gl.enable(glow::SAMPLE_ALPHA_TO_COVERAGE); + unsafe { gl.enable(glow::SAMPLE_ALPHA_TO_COVERAGE) }; } else { - gl.disable(glow::SAMPLE_ALPHA_TO_COVERAGE); + unsafe { gl.disable(glow::SAMPLE_ALPHA_TO_COVERAGE) }; } } C::SetProgram(program) => { - gl.use_program(Some(program)); + unsafe { gl.use_program(Some(program)) }; } C::SetPrimitive(ref state) => { - gl.front_face(state.front_face); + unsafe { gl.front_face(state.front_face) }; if state.cull_face != 0 { - gl.enable(glow::CULL_FACE); - gl.cull_face(state.cull_face); + unsafe { gl.enable(glow::CULL_FACE) }; + unsafe { gl.cull_face(state.cull_face) }; } else { - gl.disable(glow::CULL_FACE); + unsafe { gl.disable(glow::CULL_FACE) }; } if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) { //Note: this is a bit tricky, since we are controlling the clip, not the clamp. if state.unclipped_depth { - gl.enable(glow::DEPTH_CLAMP); + unsafe { gl.enable(glow::DEPTH_CLAMP) }; } else { - gl.disable(glow::DEPTH_CLAMP); + unsafe { gl.disable(glow::DEPTH_CLAMP) }; } } } C::SetBlendConstant(c) => { - gl.blend_color(c[0], c[1], c[2], c[3]); + unsafe { gl.blend_color(c[0], c[1], c[2], c[3]) }; } C::SetColorTarget { draw_buffer_index, @@ -1028,62 +1130,79 @@ impl super::Queue { } => { use wgt::ColorWrites as Cw; if let Some(index) = draw_buffer_index { - gl.color_mask_draw_buffer( - index, - mask.contains(Cw::RED), - mask.contains(Cw::GREEN), - mask.contains(Cw::BLUE), - mask.contains(Cw::ALPHA), - ); + unsafe { + gl.color_mask_draw_buffer( + index, + mask.contains(Cw::RED), + mask.contains(Cw::GREEN), + mask.contains(Cw::BLUE), + mask.contains(Cw::ALPHA), + ) + }; if let Some(ref blend) = *blend { - gl.enable_draw_buffer(index, glow::BLEND); + unsafe { gl.enable_draw_buffer(index, glow::BLEND) }; if blend.color != blend.alpha { - gl.blend_equation_separate_draw_buffer( - index, - blend.color.equation, - blend.alpha.equation, - ); - gl.blend_func_separate_draw_buffer( - index, - blend.color.src, - blend.color.dst, - blend.alpha.src, - blend.alpha.dst, - ); + unsafe { + gl.blend_equation_separate_draw_buffer( + index, + blend.color.equation, + blend.alpha.equation, + ) + }; + unsafe { + gl.blend_func_separate_draw_buffer( + index, + blend.color.src, + blend.color.dst, + blend.alpha.src, + blend.alpha.dst, + ) + }; } else { - gl.blend_equation_draw_buffer(index, blend.color.equation); - gl.blend_func_draw_buffer(index, blend.color.src, blend.color.dst); + unsafe { gl.blend_equation_draw_buffer(index, blend.color.equation) }; + unsafe { + gl.blend_func_draw_buffer(index, blend.color.src, blend.color.dst) + }; } } else if self .shared .private_caps .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER) { - gl.disable_draw_buffer(index, glow::BLEND); + unsafe { gl.disable_draw_buffer(index, glow::BLEND) }; } } else { - gl.color_mask( - mask.contains(Cw::RED), - mask.contains(Cw::GREEN), - mask.contains(Cw::BLUE), - mask.contains(Cw::ALPHA), - ); + unsafe { + gl.color_mask( + mask.contains(Cw::RED), + mask.contains(Cw::GREEN), + mask.contains(Cw::BLUE), + mask.contains(Cw::ALPHA), + ) + }; if let Some(ref blend) = *blend { - gl.enable(glow::BLEND); + unsafe { gl.enable(glow::BLEND) }; if blend.color != blend.alpha { - gl.blend_equation_separate(blend.color.equation, blend.alpha.equation); - gl.blend_func_separate( - blend.color.src, - blend.color.dst, - blend.alpha.src, - blend.alpha.dst, - ); + unsafe { + gl.blend_equation_separate( + blend.color.equation, + blend.alpha.equation, + ) + }; + unsafe { + gl.blend_func_separate( + blend.color.src, + blend.color.dst, + blend.alpha.src, + blend.alpha.dst, + ) + }; } else { - gl.blend_equation(blend.color.equation); - gl.blend_func(blend.color.src, blend.color.dst); + unsafe { gl.blend_equation(blend.color.equation) }; + unsafe { gl.blend_func(blend.color.src, blend.color.dst) }; } } else { - gl.disable(glow::BLEND); + unsafe { gl.disable(glow::BLEND) }; } } } @@ -1094,40 +1213,44 @@ impl super::Queue { offset, size, } => { - gl.bind_buffer_range(target, slot, Some(buffer), offset, size); + unsafe { gl.bind_buffer_range(target, slot, Some(buffer), offset, size) }; } C::BindSampler(texture_index, sampler) => { - gl.bind_sampler(texture_index, sampler); + unsafe { gl.bind_sampler(texture_index, sampler) }; } C::BindTexture { slot, texture, target, } => { - gl.active_texture(glow::TEXTURE0 + slot); - gl.bind_texture(target, Some(texture)); + unsafe { gl.active_texture(glow::TEXTURE0 + slot) }; + unsafe { gl.bind_texture(target, Some(texture)) }; } C::BindImage { slot, ref binding } => { - gl.bind_image_texture( - slot, - binding.raw, - binding.mip_level as i32, - binding.array_layer.is_none(), - binding.array_layer.unwrap_or_default() as i32, - binding.access, - binding.format, - ); + unsafe { + gl.bind_image_texture( + slot, + binding.raw, + binding.mip_level as i32, + binding.array_layer.is_none(), + binding.array_layer.unwrap_or_default() as i32, + binding.access, + binding.format, + ) + }; } #[cfg(not(target_arch = "wasm32"))] C::InsertDebugMarker(ref range) => { let marker = extract_marker(data_bytes, range); - gl.debug_message_insert( - glow::DEBUG_SOURCE_APPLICATION, - glow::DEBUG_TYPE_MARKER, - DEBUG_ID, - glow::DEBUG_SEVERITY_NOTIFICATION, - marker, - ); + unsafe { + gl.debug_message_insert( + glow::DEBUG_SOURCE_APPLICATION, + glow::DEBUG_TYPE_MARKER, + DEBUG_ID, + glow::DEBUG_SEVERITY_NOTIFICATION, + marker, + ) + }; } #[cfg(target_arch = "wasm32")] C::InsertDebugMarker(_) => (), @@ -1136,11 +1259,15 @@ impl super::Queue { #[cfg(not(target_arch = "wasm32"))] let marker = extract_marker(data_bytes, range); #[cfg(not(target_arch = "wasm32"))] - gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, marker); + unsafe { + gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, marker) + }; } C::PopDebugGroup => { #[cfg(not(target_arch = "wasm32"))] - gl.pop_debug_group(); + unsafe { + gl.pop_debug_group() + }; } C::SetPushConstants { ref uniform, @@ -1161,47 +1288,47 @@ impl super::Queue { match uniform.utype { glow::FLOAT => { let data = get_data::(data_bytes, offset)[0]; - gl.uniform_1_f32(location, data); + unsafe { gl.uniform_1_f32(location, data) }; } glow::FLOAT_VEC2 => { let data = get_data::<[f32; 2]>(data_bytes, offset)[0]; - gl.uniform_2_f32_slice(location, &data); + unsafe { gl.uniform_2_f32_slice(location, &data) }; } glow::FLOAT_VEC3 => { let data = get_data::<[f32; 3]>(data_bytes, offset)[0]; - gl.uniform_3_f32_slice(location, &data); + unsafe { gl.uniform_3_f32_slice(location, &data) }; } glow::FLOAT_VEC4 => { let data = get_data::<[f32; 4]>(data_bytes, offset)[0]; - gl.uniform_4_f32_slice(location, &data); + unsafe { gl.uniform_4_f32_slice(location, &data) }; } glow::INT => { let data = get_data::(data_bytes, offset)[0]; - gl.uniform_1_i32(location, data); + unsafe { gl.uniform_1_i32(location, data) }; } glow::INT_VEC2 => { let data = get_data::<[i32; 2]>(data_bytes, offset)[0]; - gl.uniform_2_i32_slice(location, &data); + unsafe { gl.uniform_2_i32_slice(location, &data) }; } glow::INT_VEC3 => { let data = get_data::<[i32; 3]>(data_bytes, offset)[0]; - gl.uniform_3_i32_slice(location, &data); + unsafe { gl.uniform_3_i32_slice(location, &data) }; } glow::INT_VEC4 => { let data = get_data::<[i32; 4]>(data_bytes, offset)[0]; - gl.uniform_4_i32_slice(location, &data); + unsafe { gl.uniform_4_i32_slice(location, &data) }; } glow::FLOAT_MAT2 => { let data = get_data::<[f32; 4]>(data_bytes, offset)[0]; - gl.uniform_matrix_2_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_2_f32_slice(location, false, &data) }; } glow::FLOAT_MAT3 => { let data = get_data::<[f32; 9]>(data_bytes, offset)[0]; - gl.uniform_matrix_3_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_3_f32_slice(location, false, &data) }; } glow::FLOAT_MAT4 => { let data = get_data::<[f32; 16]>(data_bytes, offset)[0]; - gl.uniform_matrix_4_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_4_f32_slice(location, false, &data) }; } _ => panic!("Unsupported uniform datatype!"), } @@ -1218,27 +1345,26 @@ impl crate::Queue for super::Queue { ) -> Result<(), crate::DeviceError> { let shared = Arc::clone(&self.shared); let gl = &shared.context.lock(); - self.reset_state(gl); + unsafe { self.reset_state(gl) }; for cmd_buf in command_buffers.iter() { #[cfg(not(target_arch = "wasm32"))] if let Some(ref label) = cmd_buf.label { - gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label); + unsafe { gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label) }; } for command in cmd_buf.commands.iter() { - self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.queries); + unsafe { self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.queries) }; } #[cfg(not(target_arch = "wasm32"))] if cmd_buf.label.is_some() { - gl.pop_debug_group(); + unsafe { gl.pop_debug_group() }; } } if let Some((fence, value)) = signal_fence { fence.maintain(gl); - let sync = gl - .fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) + let sync = unsafe { gl.fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) } .map_err(|_| crate::DeviceError::OutOfMemory)?; fence.pending.push((value, sync)); } @@ -1252,12 +1378,12 @@ impl crate::Queue for super::Queue { texture: super::Texture, ) -> Result<(), crate::SurfaceError> { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] - let gl = &self.shared.context.get_without_egl_lock(); + let gl = unsafe { &self.shared.context.get_without_egl_lock() }; #[cfg(all(target_arch = "wasm32", not(feature = "emscripten")))] let gl = &self.shared.context.glow_context; - surface.present(texture, gl) + unsafe { surface.present(texture, gl) } } unsafe fn get_timestamp_period(&self) -> f32 { diff --git a/wgpu-hal/src/gles/web.rs b/wgpu-hal/src/gles/web.rs index e2f9df0e26..b9e7302182 100644 --- a/wgpu-hal/src/gles/web.rs +++ b/wgpu-hal/src/gles/web.rs @@ -101,7 +101,7 @@ impl crate::Instance for Instance { None => return Vec::new(), }; - super::Adapter::expose(AdapterContext { glow_context: gl }) + unsafe { super::Adapter::expose(AdapterContext { glow_context: gl }) } .into_iter() .collect() } @@ -172,67 +172,67 @@ impl Surface { if swapchain.format.describe().srgb { // Important to set the viewport since we don't know in what state the user left it. - gl.viewport( - 0, - 0, - swapchain.extent.width as _, - swapchain.extent.height as _, - ); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); - gl.bind_sampler(0, None); - gl.active_texture(glow::TEXTURE0); - gl.bind_texture(glow::TEXTURE_2D, self.texture); - gl.use_program(self.srgb_present_program); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.draw_buffers(&[glow::BACK]); - gl.draw_arrays(glow::TRIANGLES, 0, 3); + unsafe { + gl.viewport( + 0, + 0, + swapchain.extent.width as _, + swapchain.extent.height as _, + ) + }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; + unsafe { gl.bind_sampler(0, None) }; + unsafe { gl.active_texture(glow::TEXTURE0) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; + unsafe { gl.use_program(self.srgb_present_program) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.draw_buffers(&[glow::BACK]) }; + unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; } else { - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(swapchain.framebuffer)); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(swapchain.framebuffer)) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; // Note the Y-flipping here. GL's presentation is not flipped, // but main rendering is. Therefore, we Y-flip the output positions // in the shader, and also this blit. - gl.blit_framebuffer( - 0, - swapchain.extent.height as i32, - swapchain.extent.width as i32, - 0, - 0, - 0, - swapchain.extent.width as i32, - swapchain.extent.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); + unsafe { + gl.blit_framebuffer( + 0, + swapchain.extent.height as i32, + swapchain.extent.width as i32, + 0, + 0, + 0, + swapchain.extent.width as i32, + swapchain.extent.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; } Ok(()) } unsafe fn create_srgb_present_program(gl: &glow::Context) -> glow::Program { - let program = gl - .create_program() - .expect("Could not create shader program"); - let vertex = gl - .create_shader(glow::VERTEX_SHADER) - .expect("Could not create shader"); - gl.shader_source(vertex, include_str!("./shaders/srgb_present.vert")); - gl.compile_shader(vertex); - let fragment = gl - .create_shader(glow::FRAGMENT_SHADER) - .expect("Could not create shader"); - gl.shader_source(fragment, include_str!("./shaders/srgb_present.frag")); - gl.compile_shader(fragment); - gl.attach_shader(program, vertex); - gl.attach_shader(program, fragment); - gl.link_program(program); - gl.delete_shader(vertex); - gl.delete_shader(fragment); - gl.bind_texture(glow::TEXTURE_2D, None); + let program = unsafe { gl.create_program() }.expect("Could not create shader program"); + let vertex = + unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(vertex, include_str!("./shaders/srgb_present.vert")) }; + unsafe { gl.compile_shader(vertex) }; + let fragment = + unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(fragment, include_str!("./shaders/srgb_present.frag")) }; + unsafe { gl.compile_shader(fragment) }; + unsafe { gl.attach_shader(program, vertex) }; + unsafe { gl.attach_shader(program, fragment) }; + unsafe { gl.link_program(program) }; + unsafe { gl.delete_shader(vertex) }; + unsafe { gl.delete_shader(fragment) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; program } @@ -253,49 +253,57 @@ impl crate::Surface for Surface { if let Some(swapchain) = self.swapchain.take() { // delete all frame buffers already allocated - gl.delete_framebuffer(swapchain.framebuffer); + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } if self.srgb_present_program.is_none() && config.format.describe().srgb { - self.srgb_present_program = Some(Self::create_srgb_present_program(gl)); + self.srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); } if let Some(texture) = self.texture.take() { - gl.delete_texture(texture); + unsafe { gl.delete_texture(texture) }; } - self.texture = Some(gl.create_texture().unwrap()); + self.texture = Some(unsafe { gl.create_texture() }.unwrap()); let desc = device.shared.describe_texture_format(config.format); - gl.bind_texture(glow::TEXTURE_2D, self.texture); - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MIN_FILTER, - glow::NEAREST as _, - ); - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MAG_FILTER, - glow::NEAREST as _, - ); - gl.tex_storage_2d( - glow::TEXTURE_2D, - 1, - desc.internal, - config.extent.width as i32, - config.extent.height as i32, - ); - - let framebuffer = gl.create_framebuffer().unwrap(); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)); - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::TEXTURE_2D, - self.texture, - 0, - ); - gl.bind_texture(glow::TEXTURE_2D, None); + unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MIN_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MAG_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_storage_2d( + glow::TEXTURE_2D, + 1, + desc.internal, + config.extent.width as i32, + config.extent.height as i32, + ) + }; + + let framebuffer = unsafe { gl.create_framebuffer() }.unwrap(); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::TEXTURE_2D, + self.texture, + 0, + ) + }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; self.swapchain = Some(Swapchain { extent: config.extent, @@ -310,10 +318,10 @@ impl crate::Surface for Surface { unsafe fn unconfigure(&mut self, device: &super::Device) { let gl = device.shared.context.lock(); if let Some(swapchain) = self.swapchain.take() { - gl.delete_framebuffer(swapchain.framebuffer); + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } if let Some(renderbuffer) = self.texture.take() { - gl.delete_texture(renderbuffer); + unsafe { gl.delete_texture(renderbuffer) }; } } diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index 9dc59dc9f8..b688e326d6 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -42,6 +42,7 @@ #![warn( trivial_casts, trivial_numeric_casts, + unsafe_op_in_unsafe_fn, unused_extern_crates, unused_qualifications, // We don't match on a reference, unless required. diff --git a/wgpu-hal/src/metal/device.rs b/wgpu-hal/src/metal/device.rs index d7c464b730..a2d151c02c 100644 --- a/wgpu-hal/src/metal/device.rs +++ b/wgpu-hal/src/metal/device.rs @@ -266,7 +266,7 @@ impl crate::Device for super::Device { let ptr = buffer.raw.contents() as *mut u8; assert!(!ptr.is_null()); Ok(crate::BufferMapping { - ptr: ptr::NonNull::new(ptr.offset(range.start as isize)).unwrap(), + ptr: ptr::NonNull::new(unsafe { ptr.offset(range.start as isize) }).unwrap(), is_coherent: true, }) } diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index 886d145369..b36dbe473a 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -88,19 +88,18 @@ impl crate::Instance for Instance { #[cfg(target_os = "ios")] raw_window_handle::RawWindowHandle::UiKit(handle) => { let _ = &self.managed_metal_layer_delegate; - Ok(Surface::from_view(handle.ui_view, None)) + Ok(unsafe { Surface::from_view(handle.ui_view, None) }) } #[cfg(target_os = "macos")] - raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(Surface::from_view( - handle.ns_view, - Some(&self.managed_metal_layer_delegate), - )), + raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(unsafe { + Surface::from_view(handle.ns_view, Some(&self.managed_metal_layer_delegate)) + }), _ => Err(crate::InstanceError), } } unsafe fn destroy_surface(&self, surface: Surface) { - surface.dispose(); + unsafe { surface.dispose() }; } unsafe fn enumerate_adapters(&self) -> Vec> { diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 8131e21018..8d86415ef3 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -83,9 +83,10 @@ impl super::Surface { delegate: Option<&HalManagedMetalLayerDelegate>, ) -> Self { let view = view as *mut Object; - let render_layer = + let render_layer = unsafe { mem::transmute::<_, &mtl::MetalLayerRef>(Self::get_metal_layer(view, delegate)) - .to_owned(); + } + .to_owned(); let _: *mut c_void = msg_send![view, retain]; Self::new(NonNull::new(view), render_layer) } @@ -136,7 +137,7 @@ impl super::Surface { { let () = msg_send![view, setLayer: new_layer]; let () = msg_send![view, setWantsLayer: YES]; - let () = msg_send![new_layer, setContentsGravity: kCAGravityTopLeft]; + let () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityTopLeft }]; let window: *mut Object = msg_send![view, window]; if !window.is_null() { let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index 940b6738aa..72ae9ce90e 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -1110,9 +1110,11 @@ impl super::Adapter { ) -> Result, crate::DeviceError> { let mem_properties = { profiling::scope!("vkGetPhysicalDeviceMemoryProperties"); - self.instance - .raw - .get_physical_device_memory_properties(self.raw) + unsafe { + self.instance + .raw + .get_physical_device_memory_properties(self.raw) + } }; let memory_types = &mem_properties.memory_types[..mem_properties.memory_type_count as usize]; @@ -1219,7 +1221,7 @@ impl super::Adapter { let raw_queue = { profiling::scope!("vkGetDeviceQueue"); - raw_device.get_device_queue(family_index, queue_index) + unsafe { raw_device.get_device_queue(family_index, queue_index) } }; let shared = Arc::new(super::DeviceShared { @@ -1246,9 +1248,11 @@ impl super::Adapter { }); let mut relay_semaphores = [vk::Semaphore::null(); 2]; for sem in relay_semaphores.iter_mut() { - *sem = shared - .raw - .create_semaphore(&vk::SemaphoreCreateInfo::builder(), None)?; + unsafe { + *sem = shared + .raw + .create_semaphore(&vk::SemaphoreCreateInfo::builder(), None)? + }; } let queue = super::Queue { raw: raw_queue, @@ -1344,18 +1348,20 @@ impl crate::Adapter for super::Adapter { .build(); let raw_device = { profiling::scope!("vkCreateDevice"); - self.instance.raw.create_device(self.raw, &info, None)? + unsafe { self.instance.raw.create_device(self.raw, &info, None)? } }; - self.device_from_raw( - raw_device, - true, - &enabled_extensions, - features, - uab_types, - family_info.queue_family_index, - 0, - ) + unsafe { + self.device_from_raw( + raw_device, + true, + &enabled_extensions, + features, + uab_types, + family_info.queue_family_index, + 0, + ) + } } unsafe fn texture_format_capabilities( @@ -1365,10 +1371,11 @@ impl crate::Adapter for super::Adapter { use crate::TextureFormatCapabilities as Tfc; let vk_format = self.private_caps.map_texture_format(format); - let properties = self - .instance - .raw - .get_physical_device_format_properties(self.raw, vk_format); + let properties = unsafe { + self.instance + .raw + .get_physical_device_format_properties(self.raw, vk_format) + }; let features = properties.optimal_tiling_features; let mut flags = Tfc::empty(); @@ -1466,11 +1473,13 @@ impl crate::Adapter for super::Adapter { let queue_family_index = 0; //TODO { profiling::scope!("vkGetPhysicalDeviceSurfaceSupportKHR"); - match surface.functor.get_physical_device_surface_support( - self.raw, - queue_family_index, - surface.raw, - ) { + match unsafe { + surface.functor.get_physical_device_surface_support( + self.raw, + queue_family_index, + surface.raw, + ) + } { Ok(true) => (), Ok(false) => return None, Err(e) => { @@ -1482,10 +1491,11 @@ impl crate::Adapter for super::Adapter { let caps = { profiling::scope!("vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); - match surface - .functor - .get_physical_device_surface_capabilities(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_capabilities(self.raw, surface.raw) + } { Ok(caps) => caps, Err(e) => { log::error!("get_physical_device_surface_capabilities: {}", e); @@ -1527,10 +1537,11 @@ impl crate::Adapter for super::Adapter { let raw_present_modes = { profiling::scope!("vkGetPhysicalDeviceSurfacePresentModesKHR"); - match surface - .functor - .get_physical_device_surface_present_modes(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_present_modes(self.raw, surface.raw) + } { Ok(present_modes) => present_modes, Err(e) => { log::error!("get_physical_device_surface_present_modes: {}", e); @@ -1541,10 +1552,11 @@ impl crate::Adapter for super::Adapter { let raw_surface_formats = { profiling::scope!("vkGetPhysicalDeviceSurfaceFormatsKHR"); - match surface - .functor - .get_physical_device_surface_formats(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_formats(self.raw, surface.raw) + } { Ok(formats) => formats, Err(e) => { log::error!("get_physical_device_surface_formats: {}", e); diff --git a/wgpu-hal/src/vulkan/command.rs b/wgpu-hal/src/vulkan/command.rs index 96a6384836..d266cd1f47 100644 --- a/wgpu-hal/src/vulkan/command.rs +++ b/wgpu-hal/src/vulkan/command.rs @@ -50,18 +50,20 @@ impl crate::CommandEncoder for super::CommandEncoder { .command_pool(self.raw) .command_buffer_count(ALLOCATION_GRANULARITY) .build(); - let cmd_buf_vec = self.device.raw.allocate_command_buffers(&vk_info)?; + let cmd_buf_vec = unsafe { self.device.raw.allocate_command_buffers(&vk_info)? }; self.free.extend(cmd_buf_vec); } let raw = self.free.pop().unwrap(); // Set the name unconditionally, since there might be a // previous name assigned to this. - self.device.set_object_name( - vk::ObjectType::COMMAND_BUFFER, - raw, - label.unwrap_or_default(), - ); + unsafe { + self.device.set_object_name( + vk::ObjectType::COMMAND_BUFFER, + raw, + label.unwrap_or_default(), + ) + }; // Reset this in case the last renderpass was never ended. self.rpass_debug_marker_active = false; @@ -69,7 +71,7 @@ impl crate::CommandEncoder for super::CommandEncoder { let vk_info = vk::CommandBufferBeginInfo::builder() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT) .build(); - self.device.raw.begin_command_buffer(raw, &vk_info)?; + unsafe { self.device.raw.begin_command_buffer(raw, &vk_info) }?; self.active = raw; Ok(()) @@ -78,7 +80,7 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn end_encoding(&mut self) -> Result { let raw = self.active; self.active = vk::CommandBuffer::null(); - self.device.raw.end_command_buffer(raw)?; + unsafe { self.device.raw.end_command_buffer(raw) }?; Ok(super::CommandBuffer { raw }) } @@ -95,10 +97,11 @@ impl crate::CommandEncoder for super::CommandEncoder { self.free .extend(cmd_bufs.into_iter().map(|cmd_buf| cmd_buf.raw)); self.free.append(&mut self.discarded); - let _ = self - .device - .raw - .reset_command_pool(self.raw, vk::CommandPoolResetFlags::default()); + let _ = unsafe { + self.device + .raw + .reset_command_pool(self.raw, vk::CommandPoolResetFlags::default()) + }; } unsafe fn transition_buffers<'a, T>(&mut self, barriers: T) @@ -128,15 +131,17 @@ impl crate::CommandEncoder for super::CommandEncoder { } if !vk_barriers.is_empty() { - self.device.raw.cmd_pipeline_barrier( - self.active, - src_stages, - dst_stages, - vk::DependencyFlags::empty(), - &[], - vk_barriers, - &[], - ); + unsafe { + self.device.raw.cmd_pipeline_barrier( + self.active, + src_stages, + dst_stages, + vk::DependencyFlags::empty(), + &[], + vk_barriers, + &[], + ) + }; } } @@ -171,26 +176,30 @@ impl crate::CommandEncoder for super::CommandEncoder { } if !vk_barriers.is_empty() { - self.device.raw.cmd_pipeline_barrier( - self.active, - src_stages, - dst_stages, - vk::DependencyFlags::empty(), - &[], - &[], - vk_barriers, - ); + unsafe { + self.device.raw.cmd_pipeline_barrier( + self.active, + src_stages, + dst_stages, + vk::DependencyFlags::empty(), + &[], + &[], + vk_barriers, + ) + }; } } unsafe fn clear_buffer(&mut self, buffer: &super::Buffer, range: crate::MemoryRange) { - self.device.raw.cmd_fill_buffer( - self.active, - buffer.raw, - range.start, - range.end - range.start, - 0, - ); + unsafe { + self.device.raw.cmd_fill_buffer( + self.active, + buffer.raw, + range.start, + range.end - range.start, + 0, + ) + }; } unsafe fn copy_buffer_to_buffer( @@ -207,12 +216,14 @@ impl crate::CommandEncoder for super::CommandEncoder { size: r.size.get(), }); - self.device.raw.cmd_copy_buffer( - self.active, - src.raw, - dst.raw, - &smallvec::SmallVec::<[vk::BufferCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_buffer( + self.active, + src.raw, + dst.raw, + &smallvec::SmallVec::<[vk::BufferCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_texture_to_texture( @@ -244,14 +255,16 @@ impl crate::CommandEncoder for super::CommandEncoder { } }); - self.device.raw.cmd_copy_image( - self.active, - src.raw, - src_layout, - dst.raw, - DST_IMAGE_LAYOUT, - &smallvec::SmallVec::<[vk::ImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_image( + self.active, + src.raw, + src_layout, + dst.raw, + DST_IMAGE_LAYOUT, + &smallvec::SmallVec::<[vk::ImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_buffer_to_texture( @@ -264,13 +277,15 @@ impl crate::CommandEncoder for super::CommandEncoder { { let vk_regions_iter = dst.map_buffer_copies(regions); - self.device.raw.cmd_copy_buffer_to_image( - self.active, - src.raw, - dst.raw, - DST_IMAGE_LAYOUT, - &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_buffer_to_image( + self.active, + src.raw, + dst.raw, + DST_IMAGE_LAYOUT, + &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_texture_to_buffer( @@ -285,41 +300,49 @@ impl crate::CommandEncoder for super::CommandEncoder { let src_layout = conv::derive_image_layout(src_usage, src.aspects); let vk_regions_iter = src.map_buffer_copies(regions); - self.device.raw.cmd_copy_image_to_buffer( - self.active, - src.raw, - src_layout, - dst.raw, - &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_image_to_buffer( + self.active, + src.raw, + src_layout, + dst.raw, + &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_begin_query( - self.active, - set.raw, - index, - vk::QueryControlFlags::empty(), - ); + unsafe { + self.device.raw.cmd_begin_query( + self.active, + set.raw, + index, + vk::QueryControlFlags::empty(), + ) + }; } unsafe fn end_query(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_end_query(self.active, set.raw, index); + unsafe { self.device.raw.cmd_end_query(self.active, set.raw, index) }; } unsafe fn write_timestamp(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_write_timestamp( - self.active, - vk::PipelineStageFlags::BOTTOM_OF_PIPE, - set.raw, - index, - ); + unsafe { + self.device.raw.cmd_write_timestamp( + self.active, + vk::PipelineStageFlags::BOTTOM_OF_PIPE, + set.raw, + index, + ) + }; } unsafe fn reset_queries(&mut self, set: &super::QuerySet, range: Range) { - self.device.raw.cmd_reset_query_pool( - self.active, - set.raw, - range.start, - range.end - range.start, - ); + unsafe { + self.device.raw.cmd_reset_query_pool( + self.active, + set.raw, + range.start, + range.end - range.start, + ) + }; } unsafe fn copy_query_results( &mut self, @@ -329,16 +352,18 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, stride: wgt::BufferSize, ) { - self.device.raw.cmd_copy_query_pool_results( - self.active, - set.raw, - range.start, - range.end - range.start, - buffer.raw, - offset, - stride.get(), - vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT, - ); + unsafe { + self.device.raw.cmd_copy_query_pool_results( + self.active, + set.raw, + range.start, + range.end - range.start, + buffer.raw, + offset, + stride.get(), + vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT, + ) + }; } // render @@ -358,7 +383,7 @@ impl crate::CommandEncoder for super::CommandEncoder { for cat in desc.color_attachments { if let Some(cat) = cat.as_ref() { vk_clear_values.push(vk::ClearValue { - color: cat.make_vk_clear_color(), + color: unsafe { cat.make_vk_clear_color() }, }); vk_image_views.push(cat.target.view.raw); let color = super::ColorAttachmentKey { @@ -371,7 +396,7 @@ impl crate::CommandEncoder for super::CommandEncoder { rp_key.colors.push(Some(color)); fb_key.attachments.push(cat.target.view.attachment.clone()); if let Some(ref at) = cat.resolve_target { - vk_clear_values.push(mem::zeroed()); + vk_clear_values.push(unsafe { mem::zeroed() }); vk_image_views.push(at.view.raw); fb_key.attachments.push(at.view.attachment.clone()); } @@ -456,27 +481,33 @@ impl crate::CommandEncoder for super::CommandEncoder { } if let Some(label) = desc.label { - self.begin_debug_marker(label); + unsafe { self.begin_debug_marker(label) }; self.rpass_debug_marker_active = true; } - self.device - .raw - .cmd_set_viewport(self.active, 0, &vk_viewports); - self.device - .raw - .cmd_set_scissor(self.active, 0, &[render_area]); - self.device - .raw - .cmd_begin_render_pass(self.active, &vk_info, vk::SubpassContents::INLINE); + unsafe { + self.device + .raw + .cmd_set_viewport(self.active, 0, &vk_viewports); + self.device + .raw + .cmd_set_scissor(self.active, 0, &[render_area]); + self.device.raw.cmd_begin_render_pass( + self.active, + &vk_info, + vk::SubpassContents::INLINE, + ); + }; self.bind_point = vk::PipelineBindPoint::GRAPHICS; } unsafe fn end_render_pass(&mut self) { - self.device.raw.cmd_end_render_pass(self.active); - if self.rpass_debug_marker_active { - self.end_debug_marker(); - self.rpass_debug_marker_active = false; + unsafe { + self.device.raw.cmd_end_render_pass(self.active); + if self.rpass_debug_marker_active { + self.end_debug_marker(); + self.rpass_debug_marker_active = false; + } } } @@ -488,14 +519,16 @@ impl crate::CommandEncoder for super::CommandEncoder { dynamic_offsets: &[wgt::DynamicOffset], ) { let sets = [*group.set.raw()]; - self.device.raw.cmd_bind_descriptor_sets( - self.active, - self.bind_point, - layout.raw, - index, - &sets, - dynamic_offsets, - ); + unsafe { + self.device.raw.cmd_bind_descriptor_sets( + self.active, + self.bind_point, + layout.raw, + index, + &sets, + dynamic_offsets, + ) + }; } unsafe fn set_push_constants( &mut self, @@ -504,41 +537,45 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: u32, data: &[u32], ) { - self.device.raw.cmd_push_constants( - self.active, - layout.raw, - conv::map_shader_stage(stages), - offset, - slice::from_raw_parts(data.as_ptr() as _, data.len() * 4), - ); + unsafe { + self.device.raw.cmd_push_constants( + self.active, + layout.raw, + conv::map_shader_stage(stages), + offset, + slice::from_raw_parts(data.as_ptr() as _, data.len() * 4), + ) + }; } unsafe fn insert_debug_marker(&mut self, label: &str) { if let Some(ext) = self.device.debug_messenger() { let cstr = self.temp.make_c_str(label); let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build(); - ext.cmd_insert_debug_utils_label(self.active, &vk_label); + unsafe { ext.cmd_insert_debug_utils_label(self.active, &vk_label) }; } } unsafe fn begin_debug_marker(&mut self, group_label: &str) { if let Some(ext) = self.device.debug_messenger() { let cstr = self.temp.make_c_str(group_label); let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build(); - ext.cmd_begin_debug_utils_label(self.active, &vk_label); + unsafe { ext.cmd_begin_debug_utils_label(self.active, &vk_label) }; } } unsafe fn end_debug_marker(&mut self) { if let Some(ext) = self.device.debug_messenger() { - ext.cmd_end_debug_utils_label(self.active); + unsafe { ext.cmd_end_debug_utils_label(self.active) }; } } unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) { - self.device.raw.cmd_bind_pipeline( - self.active, - vk::PipelineBindPoint::GRAPHICS, - pipeline.raw, - ); + unsafe { + self.device.raw.cmd_bind_pipeline( + self.active, + vk::PipelineBindPoint::GRAPHICS, + pipeline.raw, + ) + }; } unsafe fn set_index_buffer<'a>( @@ -546,12 +583,14 @@ impl crate::CommandEncoder for super::CommandEncoder { binding: crate::BufferBinding<'a, super::Api>, format: wgt::IndexFormat, ) { - self.device.raw.cmd_bind_index_buffer( - self.active, - binding.buffer.raw, - binding.offset, - conv::map_index_format(format), - ); + unsafe { + self.device.raw.cmd_bind_index_buffer( + self.active, + binding.buffer.raw, + binding.offset, + conv::map_index_format(format), + ) + }; } unsafe fn set_vertex_buffer<'a>( &mut self, @@ -560,9 +599,11 @@ impl crate::CommandEncoder for super::CommandEncoder { ) { let vk_buffers = [binding.buffer.raw]; let vk_offsets = [binding.offset]; - self.device - .raw - .cmd_bind_vertex_buffers(self.active, index, &vk_buffers, &vk_offsets); + unsafe { + self.device + .raw + .cmd_bind_vertex_buffers(self.active, index, &vk_buffers, &vk_offsets) + }; } unsafe fn set_viewport(&mut self, rect: &crate::Rect, depth_range: Range) { let vk_viewports = [vk::Viewport { @@ -577,9 +618,11 @@ impl crate::CommandEncoder for super::CommandEncoder { min_depth: depth_range.start, max_depth: depth_range.end, }]; - self.device - .raw - .cmd_set_viewport(self.active, 0, &vk_viewports); + unsafe { + self.device + .raw + .cmd_set_viewport(self.active, 0, &vk_viewports) + }; } unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect) { let vk_scissors = [vk::Rect2D { @@ -592,19 +635,23 @@ impl crate::CommandEncoder for super::CommandEncoder { height: rect.h, }, }]; - self.device - .raw - .cmd_set_scissor(self.active, 0, &vk_scissors); + unsafe { + self.device + .raw + .cmd_set_scissor(self.active, 0, &vk_scissors) + }; } unsafe fn set_stencil_reference(&mut self, value: u32) { - self.device.raw.cmd_set_stencil_reference( - self.active, - vk::StencilFaceFlags::FRONT_AND_BACK, - value, - ); + unsafe { + self.device.raw.cmd_set_stencil_reference( + self.active, + vk::StencilFaceFlags::FRONT_AND_BACK, + value, + ) + }; } unsafe fn set_blend_constants(&mut self, color: &[f32; 4]) { - self.device.raw.cmd_set_blend_constants(self.active, color); + unsafe { self.device.raw.cmd_set_blend_constants(self.active, color) }; } unsafe fn draw( @@ -614,13 +661,15 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.device.raw.cmd_draw( - self.active, - vertex_count, - instance_count, - start_vertex, - start_instance, - ); + unsafe { + self.device.raw.cmd_draw( + self.active, + vertex_count, + instance_count, + start_vertex, + start_instance, + ) + }; } unsafe fn draw_indexed( &mut self, @@ -630,14 +679,16 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.device.raw.cmd_draw_indexed( - self.active, - index_count, - instance_count, - start_index, - base_vertex, - start_instance, - ); + unsafe { + self.device.raw.cmd_draw_indexed( + self.active, + index_count, + instance_count, + start_index, + base_vertex, + start_instance, + ) + }; } unsafe fn draw_indirect( &mut self, @@ -645,13 +696,15 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.device.raw.cmd_draw_indirect( - self.active, - buffer.raw, - offset, - draw_count, - mem::size_of::() as u32, - ); + unsafe { + self.device.raw.cmd_draw_indirect( + self.active, + buffer.raw, + offset, + draw_count, + mem::size_of::() as u32, + ) + }; } unsafe fn draw_indexed_indirect( &mut self, @@ -659,13 +712,15 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.device.raw.cmd_draw_indexed_indirect( - self.active, - buffer.raw, - offset, - draw_count, - mem::size_of::() as u32, - ); + unsafe { + self.device.raw.cmd_draw_indexed_indirect( + self.active, + buffer.raw, + offset, + draw_count, + mem::size_of::() as u32, + ) + }; } unsafe fn draw_indirect_count( &mut self, @@ -678,15 +733,17 @@ impl crate::CommandEncoder for super::CommandEncoder { let stride = mem::size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { - t.cmd_draw_indirect_count( - self.active, - buffer.raw, - offset, - count_buffer.raw, - count_offset, - max_count, - stride, - ); + unsafe { + t.cmd_draw_indirect_count( + self.active, + buffer.raw, + offset, + count_buffer.raw, + count_offset, + max_count, + stride, + ) + }; } None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"), } @@ -702,15 +759,17 @@ impl crate::CommandEncoder for super::CommandEncoder { let stride = mem::size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { - t.cmd_draw_indexed_indirect_count( - self.active, - buffer.raw, - offset, - count_buffer.raw, - count_offset, - max_count, - stride, - ); + unsafe { + t.cmd_draw_indexed_indirect_count( + self.active, + buffer.raw, + offset, + count_buffer.raw, + count_offset, + max_count, + stride, + ) + }; } None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"), } @@ -721,34 +780,40 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) { self.bind_point = vk::PipelineBindPoint::COMPUTE; if let Some(label) = desc.label { - self.begin_debug_marker(label); + unsafe { self.begin_debug_marker(label) }; self.rpass_debug_marker_active = true; } } unsafe fn end_compute_pass(&mut self) { if self.rpass_debug_marker_active { - self.end_debug_marker(); + unsafe { self.end_debug_marker() }; self.rpass_debug_marker_active = false } } unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) { - self.device.raw.cmd_bind_pipeline( - self.active, - vk::PipelineBindPoint::COMPUTE, - pipeline.raw, - ); + unsafe { + self.device.raw.cmd_bind_pipeline( + self.active, + vk::PipelineBindPoint::COMPUTE, + pipeline.raw, + ) + }; } unsafe fn dispatch(&mut self, count: [u32; 3]) { - self.device - .raw - .cmd_dispatch(self.active, count[0], count[1], count[2]); + unsafe { + self.device + .raw + .cmd_dispatch(self.active, count[0], count[1], count[2]) + }; } unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) { - self.device - .raw - .cmd_dispatch_indirect(self.active, buffer.raw, offset) + unsafe { + self.device + .raw + .cmd_dispatch_indirect(self.active, buffer.raw, offset) + } } } diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index f30cd57dc4..98243219d0 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -48,14 +48,15 @@ impl super::DeviceShared { .collect(); &buffer_vec }; - - let _result = extension.debug_utils_set_object_name( - self.raw.handle(), - &vk::DebugUtilsObjectNameInfoEXT::builder() - .object_type(object_type) - .object_handle(object.as_raw()) - .object_name(CStr::from_bytes_with_nul_unchecked(name_bytes)), - ); + let _result = unsafe { + extension.debug_utils_set_object_name( + self.raw.handle(), + &vk::DebugUtilsObjectNameInfoEXT::builder() + .object_type(object_type) + .object_handle(object.as_raw()) + .object_name(CStr::from_bytes_with_nul_unchecked(name_bytes)), + ) + }; } pub fn make_render_pass( @@ -278,13 +279,13 @@ impl super::DeviceShared { unsafe fn free_resources(&self) { for &raw in self.render_passes.lock().values() { - self.raw.destroy_render_pass(raw, None); + unsafe { self.raw.destroy_render_pass(raw, None) }; } for &raw in self.framebuffers.lock().values() { - self.raw.destroy_framebuffer(raw, None); + unsafe { self.raw.destroy_framebuffer(raw, None) }; } if self.handle_is_owned { - self.raw.destroy_device(None); + unsafe { self.raw.destroy_device(None) }; } } } @@ -308,7 +309,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { info = info.push_next(&mut info_flags); } - match self.raw.allocate_memory(&info, None) { + match unsafe { self.raw.allocate_memory(&info, None) } { Ok(memory) => Ok(memory), Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { Err(gpu_alloc::OutOfMemory::OutOfDeviceMemory) @@ -322,7 +323,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { } unsafe fn deallocate_memory(&self, memory: vk::DeviceMemory) { - self.raw.free_memory(memory, None); + unsafe { self.raw.free_memory(memory, None) }; } unsafe fn map_memory( @@ -331,10 +332,10 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { offset: u64, size: u64, ) -> Result, gpu_alloc::DeviceMapError> { - match self - .raw - .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty()) - { + match unsafe { + self.raw + .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty()) + } { Ok(ptr) => Ok(ptr::NonNull::new(ptr as *mut u8) .expect("Pointer to memory mapping must not be null")), Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { @@ -349,7 +350,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { } unsafe fn unmap_memory(&self, memory: &mut vk::DeviceMemory) { - self.raw.unmap_memory(*memory); + unsafe { self.raw.unmap_memory(*memory) }; } unsafe fn invalidate_memory_ranges( @@ -433,7 +434,7 @@ impl .pool_sizes(&filtered_counts) .build(); - match self.raw.create_descriptor_pool(&vk_info, None) { + match unsafe { self.raw.create_descriptor_pool(&vk_info, None) } { Ok(pool) => Ok(pool), Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { Err(gpu_descriptor::CreatePoolError::OutOfHostMemory) @@ -452,7 +453,7 @@ impl } unsafe fn destroy_descriptor_pool(&self, pool: vk::DescriptorPool) { - self.raw.destroy_descriptor_pool(pool, None) + unsafe { self.raw.destroy_descriptor_pool(pool, None) } } unsafe fn alloc_descriptor_sets<'a>( @@ -461,16 +462,18 @@ impl layouts: impl ExactSizeIterator, sets: &mut impl Extend, ) -> Result<(), gpu_descriptor::DeviceAllocationError> { - let result = self.raw.allocate_descriptor_sets( - &vk::DescriptorSetAllocateInfo::builder() - .descriptor_pool(*pool) - .set_layouts( - &smallvec::SmallVec::<[vk::DescriptorSetLayout; 32]>::from_iter( - layouts.cloned(), - ), - ) - .build(), - ); + let result = unsafe { + self.raw.allocate_descriptor_sets( + &vk::DescriptorSetAllocateInfo::builder() + .descriptor_pool(*pool) + .set_layouts( + &smallvec::SmallVec::<[vk::DescriptorSetLayout; 32]>::from_iter( + layouts.cloned(), + ), + ) + .build(), + ) + }; match result { Ok(vk_sets) => { @@ -499,10 +502,12 @@ impl pool: &mut vk::DescriptorPool, sets: impl Iterator, ) { - let result = self.raw.free_descriptor_sets( - *pool, - &smallvec::SmallVec::<[vk::DescriptorSet; 32]>::from_iter(sets), - ); + let result = unsafe { + self.raw.free_descriptor_sets( + *pool, + &smallvec::SmallVec::<[vk::DescriptorSet; 32]>::from_iter(sets), + ) + }; match result { Ok(()) => {} Err(err) => log::error!("free_descriptor_sets: {:?}", err), @@ -559,12 +564,12 @@ impl super::Device { let result = { profiling::scope!("vkCreateSwapchainKHR"); - functor.create_swapchain(&info, None) + unsafe { functor.create_swapchain(&info, None) } }; // doing this before bailing out with error if old_swapchain != vk::SwapchainKHR::null() { - functor.destroy_swapchain(old_swapchain, None) + unsafe { functor.destroy_swapchain(old_swapchain, None) } } let raw = match result { @@ -580,15 +585,11 @@ impl super::Device { } }; - let images = functor - .get_swapchain_images(raw) - .map_err(crate::DeviceError::from)?; + let images = + unsafe { functor.get_swapchain_images(raw) }.map_err(crate::DeviceError::from)?; let vk_info = vk::FenceCreateInfo::builder().build(); - let fence = self - .shared - .raw - .create_fence(&vk_info, None) + let fence = unsafe { self.shared.raw.create_fence(&vk_info, None) } .map_err(crate::DeviceError::from)?; Ok(super::Swapchain { @@ -741,12 +742,12 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { - self.mem_allocator.into_inner().cleanup(&*self.shared); - self.desc_allocator.into_inner().cleanup(&*self.shared); + unsafe { self.mem_allocator.into_inner().cleanup(&*self.shared) }; + unsafe { self.desc_allocator.into_inner().cleanup(&*self.shared) }; for &sem in queue.relay_semaphores.iter() { - self.shared.raw.destroy_semaphore(sem, None); + unsafe { self.shared.raw.destroy_semaphore(sem, None) }; } - self.shared.free_resources(); + unsafe { self.shared.free_resources() }; } unsafe fn create_buffer( @@ -758,8 +759,8 @@ impl crate::Device for super::Device { .usage(conv::map_buffer_usage(desc.usage)) .sharing_mode(vk::SharingMode::EXCLUSIVE); - let raw = self.shared.raw.create_buffer(&vk_info, None)?; - let req = self.shared.raw.get_buffer_memory_requirements(raw); + let raw = unsafe { self.shared.raw.create_buffer(&vk_info, None)? }; + let req = unsafe { self.shared.raw.get_buffer_memory_requirements(raw) }; let mut alloc_usage = if desc .usage @@ -784,23 +785,29 @@ impl crate::Device for super::Device { desc.memory_flags.contains(crate::MemoryFlags::TRANSIENT), ); - let block = self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: req.alignment - 1, - usage: alloc_usage, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )?; + let block = unsafe { + self.mem_allocator.lock().alloc( + &*self.shared, + gpu_alloc::Request { + size: req.size, + align_mask: req.alignment - 1, + usage: alloc_usage, + memory_types: req.memory_type_bits & self.valid_ash_memory_types, + }, + )? + }; - self.shared - .raw - .bind_buffer_memory(raw, *block.memory(), block.offset())?; + unsafe { + self.shared + .raw + .bind_buffer_memory(raw, *block.memory(), block.offset())? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::BUFFER, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::BUFFER, raw, label) + }; } Ok(super::Buffer { @@ -809,10 +816,12 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { - self.shared.raw.destroy_buffer(buffer.raw, None); - self.mem_allocator - .lock() - .dealloc(&*self.shared, buffer.block.into_inner()); + unsafe { self.shared.raw.destroy_buffer(buffer.raw, None) }; + unsafe { + self.mem_allocator + .lock() + .dealloc(&*self.shared, buffer.block.into_inner()) + }; } unsafe fn map_buffer( @@ -822,14 +831,14 @@ impl crate::Device for super::Device { ) -> Result { let size = range.end - range.start; let mut block = buffer.block.lock(); - let ptr = block.map(&*self.shared, range.start, size as usize)?; + let ptr = unsafe { block.map(&*self.shared, range.start, size as usize)? }; let is_coherent = block .props() .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT); Ok(crate::BufferMapping { ptr, is_coherent }) } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> { - buffer.block.lock().unmap(&*self.shared); + unsafe { buffer.block.lock().unmap(&*self.shared) }; Ok(()) } @@ -839,12 +848,14 @@ impl crate::Device for super::Device { { let vk_ranges = self.shared.make_memory_ranges(buffer, ranges); - self.shared - .raw - .flush_mapped_memory_ranges( - &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), - ) - .unwrap(); + unsafe { + self.shared + .raw + .flush_mapped_memory_ranges( + &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), + ) + } + .unwrap(); } unsafe fn invalidate_mapped_ranges(&self, buffer: &super::Buffer, ranges: I) where @@ -852,12 +863,14 @@ impl crate::Device for super::Device { { let vk_ranges = self.shared.make_memory_ranges(buffer, ranges); - self.shared - .raw - .invalidate_mapped_memory_ranges( - &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), - ) - .unwrap(); + unsafe { + self.shared + .raw + .invalidate_mapped_memory_ranges( + &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), + ) + } + .unwrap(); } unsafe fn create_texture( @@ -896,26 +909,32 @@ impl crate::Device for super::Device { .sharing_mode(vk::SharingMode::EXCLUSIVE) .initial_layout(vk::ImageLayout::UNDEFINED); - let raw = self.shared.raw.create_image(&vk_info, None)?; - let req = self.shared.raw.get_image_memory_requirements(raw); - - let block = self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: req.alignment - 1, - usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )?; + let raw = unsafe { self.shared.raw.create_image(&vk_info, None)? }; + let req = unsafe { self.shared.raw.get_image_memory_requirements(raw) }; + + let block = unsafe { + self.mem_allocator.lock().alloc( + &*self.shared, + gpu_alloc::Request { + size: req.size, + align_mask: req.alignment - 1, + usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, + memory_types: req.memory_type_bits & self.valid_ash_memory_types, + }, + )? + }; - self.shared - .raw - .bind_image_memory(raw, *block.memory(), block.offset())?; + unsafe { + self.shared + .raw + .bind_image_memory(raw, *block.memory(), block.offset())? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::IMAGE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::IMAGE, raw, label) + }; } Ok(super::Texture { @@ -931,10 +950,10 @@ impl crate::Device for super::Device { } unsafe fn destroy_texture(&self, texture: super::Texture) { if texture.drop_guard.is_none() { - self.shared.raw.destroy_image(texture.raw, None); + unsafe { self.shared.raw.destroy_image(texture.raw, None) }; } if let Some(block) = texture.block { - self.mem_allocator.lock().dealloc(&*self.shared, block); + unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) }; } } @@ -964,11 +983,13 @@ impl crate::Device for super::Device { texture.usage }; - let raw = self.shared.raw.create_image_view(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_image_view(&vk_info, None) }?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::IMAGE_VIEW, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::IMAGE_VIEW, raw, label) + }; } let attachment = super::FramebufferAttachment { @@ -993,12 +1014,12 @@ impl crate::Device for super::Device { let mut fbuf_lock = self.shared.framebuffers.lock(); for (key, &raw_fbuf) in fbuf_lock.iter() { if key.attachments.iter().any(|at| at.raw == view.raw) { - self.shared.raw.destroy_framebuffer(raw_fbuf, None); + unsafe { self.shared.raw.destroy_framebuffer(raw_fbuf, None) }; } } fbuf_lock.retain(|key, _| !key.attachments.iter().any(|at| at.raw == view.raw)); } - self.shared.raw.destroy_image_view(view.raw, None); + unsafe { self.shared.raw.destroy_image_view(view.raw, None) }; } unsafe fn create_sampler( @@ -1040,17 +1061,19 @@ impl crate::Device for super::Device { vk_info = vk_info.border_color(conv::map_border_color(color)); } - let raw = self.shared.raw.create_sampler(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_sampler(&vk_info, None)? }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::SAMPLER, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::SAMPLER, raw, label) + }; } Ok(super::Sampler { raw }) } unsafe fn destroy_sampler(&self, sampler: super::Sampler) { - self.shared.raw.destroy_sampler(sampler.raw, None); + unsafe { self.shared.raw.destroy_sampler(sampler.raw, None) }; } unsafe fn create_command_encoder( @@ -1061,7 +1084,7 @@ impl crate::Device for super::Device { .queue_family_index(desc.queue.family_index) .flags(vk::CommandPoolCreateFlags::TRANSIENT) .build(); - let raw = self.shared.raw.create_command_pool(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_command_pool(&vk_info, None)? }; Ok(super::CommandEncoder { raw, @@ -1075,17 +1098,19 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_command_encoder(&self, cmd_encoder: super::CommandEncoder) { - if !cmd_encoder.free.is_empty() { - self.shared - .raw - .free_command_buffers(cmd_encoder.raw, &cmd_encoder.free); - } - if !cmd_encoder.discarded.is_empty() { - self.shared - .raw - .free_command_buffers(cmd_encoder.raw, &cmd_encoder.discarded); + unsafe { + if !cmd_encoder.free.is_empty() { + self.shared + .raw + .free_command_buffers(cmd_encoder.raw, &cmd_encoder.free) + } + if !cmd_encoder.discarded.is_empty() { + self.shared + .raw + .free_command_buffers(cmd_encoder.raw, &cmd_encoder.discarded) + } + self.shared.raw.destroy_command_pool(cmd_encoder.raw, None); } - self.shared.raw.destroy_command_pool(cmd_encoder.raw, None); } unsafe fn create_bind_group_layout( @@ -1224,14 +1249,17 @@ impl crate::Device for super::Device { let vk_info = vk_info.flags(dsl_create_flags); - let raw = self - .shared - .raw - .create_descriptor_set_layout(&vk_info, None)?; + let raw = unsafe { + self.shared + .raw + .create_descriptor_set_layout(&vk_info, None)? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, raw, label) + }; } Ok(super::BindGroupLayout { @@ -1243,9 +1271,11 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_bind_group_layout(&self, bg_layout: super::BindGroupLayout) { - self.shared - .raw - .destroy_descriptor_set_layout(bg_layout.raw, None); + unsafe { + self.shared + .raw + .destroy_descriptor_set_layout(bg_layout.raw, None) + }; } unsafe fn create_pipeline_layout( @@ -1275,12 +1305,14 @@ impl crate::Device for super::Device { let raw = { profiling::scope!("vkCreatePipelineLayout"); - self.shared.raw.create_pipeline_layout(&vk_info, None)? + unsafe { self.shared.raw.create_pipeline_layout(&vk_info, None)? } }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE_LAYOUT, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE_LAYOUT, raw, label) + }; } let mut binding_arrays = BTreeMap::new(); @@ -1304,31 +1336,37 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_pipeline_layout(&self, pipeline_layout: super::PipelineLayout) { - self.shared - .raw - .destroy_pipeline_layout(pipeline_layout.raw, None); + unsafe { + self.shared + .raw + .destroy_pipeline_layout(pipeline_layout.raw, None) + }; } unsafe fn create_bind_group( &self, desc: &crate::BindGroupDescriptor, ) -> Result { - let mut vk_sets = self.desc_allocator.lock().allocate( - &*self.shared, - &desc.layout.raw, - if desc.layout.requires_update_after_bind { - gpu_descriptor::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND - } else { - gpu_descriptor::DescriptorSetLayoutCreateFlags::empty() - }, - &desc.layout.desc_count, - 1, - )?; + let mut vk_sets = unsafe { + self.desc_allocator.lock().allocate( + &*self.shared, + &desc.layout.raw, + if desc.layout.requires_update_after_bind { + gpu_descriptor::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND + } else { + gpu_descriptor::DescriptorSetLayoutCreateFlags::empty() + }, + &desc.layout.desc_count, + 1, + )? + }; let set = vk_sets.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::DESCRIPTOR_SET, *set.raw(), label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::DESCRIPTOR_SET, *set.raw(), label) + }; } let mut writes = Vec::with_capacity(desc.entries.len()); @@ -1397,13 +1435,15 @@ impl crate::Device for super::Device { writes.push(write.build()); } - self.shared.raw.update_descriptor_sets(&writes, &[]); + unsafe { self.shared.raw.update_descriptor_sets(&writes, &[]) }; Ok(super::BindGroup { set }) } unsafe fn destroy_bind_group(&self, group: super::BindGroup) { - self.desc_allocator - .lock() - .free(&*self.shared, Some(group.set)); + unsafe { + self.desc_allocator + .lock() + .free(&*self.shared, Some(group.set)) + }; } unsafe fn create_shader_module( @@ -1448,8 +1488,10 @@ impl crate::Device for super::Device { let raw = self.create_shader_module_impl(&spv)?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::SHADER_MODULE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::SHADER_MODULE, raw, label) + }; } Ok(super::ShaderModule::Raw(raw)) @@ -1457,7 +1499,7 @@ impl crate::Device for super::Device { unsafe fn destroy_shader_module(&self, module: super::ShaderModule) { match module { super::ShaderModule::Raw(raw) => { - self.shared.raw.destroy_shader_module(raw, None); + unsafe { self.shared.raw.destroy_shader_module(raw, None) }; } super::ShaderModule::Intermediate { .. } => {} } @@ -1674,33 +1716,37 @@ impl crate::Device for super::Device { let mut raw_vec = { profiling::scope!("vkCreateGraphicsPipelines"); - self.shared - .raw - .create_graphics_pipelines(vk::PipelineCache::null(), &vk_infos, None) - .map_err(|(_, e)| crate::DeviceError::from(e))? + unsafe { + self.shared + .raw + .create_graphics_pipelines(vk::PipelineCache::null(), &vk_infos, None) + .map_err(|(_, e)| crate::DeviceError::from(e)) + }? }; let raw = raw_vec.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE, raw, label) + }; } if let Some(raw_module) = compiled_vs.temp_raw_module { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } if let Some(CompiledStage { temp_raw_module: Some(raw_module), .. }) = compiled_fs { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } Ok(super::RenderPipeline { raw }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { - self.shared.raw.destroy_pipeline(pipeline.raw, None); + unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) }; } unsafe fn create_compute_pipeline( @@ -1722,26 +1768,30 @@ impl crate::Device for super::Device { let mut raw_vec = { profiling::scope!("vkCreateComputePipelines"); - self.shared - .raw - .create_compute_pipelines(vk::PipelineCache::null(), &vk_infos, None) - .map_err(|(_, e)| crate::DeviceError::from(e))? + unsafe { + self.shared + .raw + .create_compute_pipelines(vk::PipelineCache::null(), &vk_infos, None) + .map_err(|(_, e)| crate::DeviceError::from(e)) + }? }; let raw = raw_vec.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE, raw, label) + }; } if let Some(raw_module) = compiled.temp_raw_module { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } Ok(super::ComputePipeline { raw }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { - self.shared.raw.destroy_pipeline(pipeline.raw, None); + unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) }; } unsafe fn create_query_set( @@ -1769,16 +1819,18 @@ impl crate::Device for super::Device { .pipeline_statistics(pipeline_statistics) .build(); - let raw = self.shared.raw.create_query_pool(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_query_pool(&vk_info, None) }?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::QUERY_POOL, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::QUERY_POOL, raw, label) + }; } Ok(super::QuerySet { raw }) } unsafe fn destroy_query_set(&self, set: super::QuerySet) { - self.shared.raw.destroy_query_pool(set.raw, None); + unsafe { self.shared.raw.destroy_query_pool(set.raw, None) }; } unsafe fn create_fence(&self) -> Result { @@ -1786,7 +1838,7 @@ impl crate::Device for super::Device { let mut sem_type_info = vk::SemaphoreTypeCreateInfo::builder().semaphore_type(vk::SemaphoreType::TIMELINE); let vk_info = vk::SemaphoreCreateInfo::builder().push_next(&mut sem_type_info); - let raw = self.shared.raw.create_semaphore(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_semaphore(&vk_info, None) }?; super::Fence::TimelineSemaphore(raw) } else { super::Fence::FencePool { @@ -1799,7 +1851,7 @@ impl crate::Device for super::Device { unsafe fn destroy_fence(&self, fence: super::Fence) { match fence { super::Fence::TimelineSemaphore(raw) => { - self.shared.raw.destroy_semaphore(raw, None); + unsafe { self.shared.raw.destroy_semaphore(raw, None) }; } super::Fence::FencePool { active, @@ -1807,10 +1859,10 @@ impl crate::Device for super::Device { last_completed: _, } => { for (_, raw) in active { - self.shared.raw.destroy_fence(raw, None); + unsafe { self.shared.raw.destroy_fence(raw, None) }; } for raw in free { - self.shared.raw.destroy_fence(raw, None); + unsafe { self.shared.raw.destroy_fence(raw, None) }; } } } @@ -1839,12 +1891,12 @@ impl crate::Device for super::Device { .semaphores(&semaphores) .values(&values); let result = match self.shared.extension_fns.timeline_semaphore { - Some(super::ExtensionFn::Extension(ref ext)) => { + Some(super::ExtensionFn::Extension(ref ext)) => unsafe { ext.wait_semaphores(&vk_info, timeout_ns) - } - Some(super::ExtensionFn::Promoted) => { + }, + Some(super::ExtensionFn::Promoted) => unsafe { self.shared.raw.wait_semaphores(&vk_info, timeout_ns) - } + }, None => unreachable!(), }; match result { @@ -1863,7 +1915,9 @@ impl crate::Device for super::Device { } else { match active.iter().find(|&&(value, _)| value >= wait_value) { Some(&(_, raw)) => { - match self.shared.raw.wait_for_fences(&[raw], true, timeout_ns) { + match unsafe { + self.shared.raw.wait_for_fences(&[raw], true, timeout_ns) + } { Ok(()) => Ok(true), Err(vk::Result::TIMEOUT) => Ok(false), Err(other) => Err(other.into()), @@ -1885,9 +1939,11 @@ impl crate::Device for super::Device { // Renderdoc requires us to give us the pointer that vkInstance _points to_. let raw_vk_instance = ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _; - let raw_vk_instance_dispatch_table = *raw_vk_instance; - self.render_doc - .start_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance }; + unsafe { + self.render_doc + .start_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + } } #[cfg(not(feature = "renderdoc"))] false @@ -1898,10 +1954,12 @@ impl crate::Device for super::Device { // Renderdoc requires us to give us the pointer that vkInstance _points to_. let raw_vk_instance = ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _; - let raw_vk_instance_dispatch_table = *raw_vk_instance; + let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance }; - self.render_doc - .end_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + } } } } diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index 19d7684eb2..0f828e3a26 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -31,17 +31,17 @@ unsafe extern "system" fn debug_utils_messenger_callback( _ => log::Level::Warn, }; - let cd = &*callback_data_ptr; + let cd = unsafe { &*callback_data_ptr }; let message_id_name = if cd.p_message_id_name.is_null() { Cow::from("") } else { - CStr::from_ptr(cd.p_message_id_name).to_string_lossy() + unsafe { CStr::from_ptr(cd.p_message_id_name) }.to_string_lossy() }; let message = if cd.p_message.is_null() { Cow::from("") } else { - CStr::from_ptr(cd.p_message).to_string_lossy() + unsafe { CStr::from_ptr(cd.p_message) }.to_string_lossy() }; let _ = std::panic::catch_unwind(|| { @@ -56,14 +56,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( }); if cd.queue_label_count != 0 { - let labels = slice::from_raw_parts(cd.p_queue_labels, cd.queue_label_count as usize); + let labels = + unsafe { slice::from_raw_parts(cd.p_queue_labels, cd.queue_label_count as usize) }; let names = labels .iter() .flat_map(|dul_obj| { - dul_obj - .p_label_name - .as_ref() - .map(|lbl| CStr::from_ptr(lbl).to_string_lossy()) + unsafe { dul_obj.p_label_name.as_ref() } + .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy()) }) .collect::>(); @@ -73,14 +72,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( } if cd.cmd_buf_label_count != 0 { - let labels = slice::from_raw_parts(cd.p_cmd_buf_labels, cd.cmd_buf_label_count as usize); + let labels = + unsafe { slice::from_raw_parts(cd.p_cmd_buf_labels, cd.cmd_buf_label_count as usize) }; let names = labels .iter() .flat_map(|dul_obj| { - dul_obj - .p_label_name - .as_ref() - .map(|lbl| CStr::from_ptr(lbl).to_string_lossy()) + unsafe { dul_obj.p_label_name.as_ref() } + .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy()) }) .collect::>(); @@ -90,15 +88,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( } if cd.object_count != 0 { - let labels = slice::from_raw_parts(cd.p_objects, cd.object_count as usize); + let labels = unsafe { slice::from_raw_parts(cd.p_objects, cd.object_count as usize) }; //TODO: use color fields of `vk::DebugUtilsLabelExt`? let names = labels .iter() .map(|obj_info| { - let name = obj_info - .p_object_name - .as_ref() - .map(|name| CStr::from_ptr(name).to_string_lossy()) + let name = unsafe { obj_info.p_object_name.as_ref() } + .map(|name| unsafe { CStr::from_ptr(name) }.to_string_lossy()) .unwrap_or(Cow::Borrowed("?")); format!( @@ -125,9 +121,9 @@ impl super::Swapchain { profiling::scope!("Swapchain::release_resources"); { profiling::scope!("vkDeviceWaitIdle"); - let _ = device.device_wait_idle(); + let _ = unsafe { device.device_wait_idle() }; }; - device.destroy_fence(self.fence, None); + unsafe { device.destroy_fence(self.fence, None) }; self } } @@ -256,9 +252,8 @@ impl super::Instance { | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE, ) .pfn_user_callback(Some(debug_utils_messenger_callback)); - let messenger = extension - .create_debug_utils_messenger(&vk_info, None) - .unwrap(); + let messenger = + unsafe { extension.create_debug_utils_messenger(&vk_info, None) }.unwrap(); Some(super::DebugUtils { extension, messenger, @@ -484,7 +479,7 @@ impl crate::Instance for super::Instance { unsafe fn init(desc: &crate::InstanceDescriptor) -> Result { use crate::auxil::cstr_from_bytes_until_nul; - let entry = match ash::Entry::load() { + let entry = match unsafe { ash::Entry::load() } { Ok(entry) => entry, Err(err) => { log::info!("Missing Vulkan entry points: {:?}", err); @@ -595,22 +590,24 @@ impl crate::Instance for super::Instance { .enabled_layer_names(&str_pointers[..layers.len()]) .enabled_extension_names(&str_pointers[layers.len()..]); - entry.create_instance(&create_info, None).map_err(|e| { + unsafe { entry.create_instance(&create_info, None) }.map_err(|e| { log::warn!("create_instance: {:?}", e); crate::InstanceError })? }; - Self::from_raw( - entry, - vk_instance, - driver_api_version, - android_sdk_version, - extensions, - desc.flags, - has_nv_optimus, - Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance - ) + unsafe { + Self::from_raw( + entry, + vk_instance, + driver_api_version, + android_sdk_version, + extensions, + desc.flags, + has_nv_optimus, + Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance + ) + } } unsafe fn create_surface( @@ -635,7 +632,7 @@ impl crate::Instance for super::Instance { (Rwh::Win32(handle), _) => { use winapi::um::libloaderapi::GetModuleHandleW; - let hinstance = GetModuleHandleW(std::ptr::null()); + let hinstance = unsafe { GetModuleHandleW(std::ptr::null()) }; self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd) } #[cfg(target_os = "macos")] @@ -655,13 +652,13 @@ impl crate::Instance for super::Instance { } unsafe fn destroy_surface(&self, surface: super::Surface) { - surface.functor.destroy_surface(surface.raw, None); + unsafe { surface.functor.destroy_surface(surface.raw, None) }; } unsafe fn enumerate_adapters(&self) -> Vec> { use crate::auxil::db; - let raw_devices = match self.shared.raw.enumerate_physical_devices() { + let raw_devices = match unsafe { self.shared.raw.enumerate_physical_devices() } { Ok(devices) => devices, Err(err) => { log::error!("enumerate_adapters: {}", err); @@ -708,9 +705,9 @@ impl crate::Surface for super::Surface { let old = self .swapchain .take() - .map(|sc| sc.release_resources(&device.shared.raw)); + .map(|sc| unsafe { sc.release_resources(&device.shared.raw) }); - let swapchain = device.create_swapchain(self, config, old)?; + let swapchain = unsafe { device.create_swapchain(self, config, old)? }; self.swapchain = Some(swapchain); Ok(()) @@ -718,8 +715,8 @@ impl crate::Surface for super::Surface { unsafe fn unconfigure(&mut self, device: &super::Device) { if let Some(sc) = self.swapchain.take() { - let swapchain = sc.release_resources(&device.shared.raw); - swapchain.functor.destroy_swapchain(swapchain.raw, None); + let swapchain = unsafe { sc.release_resources(&device.shared.raw) }; + unsafe { swapchain.functor.destroy_swapchain(swapchain.raw, None) }; } } @@ -748,23 +745,22 @@ impl crate::Surface for super::Surface { } // will block if no image is available - let (index, suboptimal) = - match sc - .functor + let (index, suboptimal) = match unsafe { + sc.functor .acquire_next_image(sc.raw, timeout_ns, vk::Semaphore::null(), sc.fence) - { - Ok(pair) => pair, - Err(error) => { - return match error { - vk::Result::TIMEOUT => Ok(None), - vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => { - Err(crate::SurfaceError::Outdated) - } - vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost), - other => Err(crate::DeviceError::from(other).into()), + } { + Ok(pair) => pair, + Err(error) => { + return match error { + vk::Result::TIMEOUT => Ok(None), + vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => { + Err(crate::SurfaceError::Outdated) } + vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost), + other => Err(crate::DeviceError::from(other).into()), } - }; + } + }; // special case for Intel Vulkan returning bizzare values (ugh) if sc.device.vendor_id == crate::auxil::db::intel::VENDOR && index > 0x100 { @@ -773,14 +769,9 @@ impl crate::Surface for super::Surface { let fences = &[sc.fence]; - sc.device - .raw - .wait_for_fences(fences, true, !0) - .map_err(crate::DeviceError::from)?; - sc.device - .raw - .reset_fences(fences) + unsafe { sc.device.raw.wait_for_fences(fences, true, !0) } .map_err(crate::DeviceError::from)?; + unsafe { sc.device.raw.reset_fences(fences) }.map_err(crate::DeviceError::from)?; let texture = super::SurfaceTexture { index, diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index fd1266de37..f8ffbc718e 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -589,10 +589,11 @@ impl crate::Queue for Queue { } => { fence_raw = match free.pop() { Some(raw) => raw, - None => self - .device - .raw - .create_fence(&vk::FenceCreateInfo::builder(), None)?, + None => unsafe { + self.device + .raw + .create_fence(&vk::FenceCreateInfo::builder(), None)? + }, }; active.push((value, fence_raw)); } @@ -620,9 +621,11 @@ impl crate::Queue for Queue { vk_info = vk_info.signal_semaphores(&signal_semaphores[..signal_count]); profiling::scope!("vkQueueSubmit"); - self.device - .raw - .queue_submit(self.raw, &[vk_info.build()], fence_raw)?; + unsafe { + self.device + .raw + .queue_submit(self.raw, &[vk_info.build()], fence_raw)? + }; Ok(()) } @@ -645,13 +648,13 @@ impl crate::Queue for Queue { let suboptimal = { profiling::scope!("vkQueuePresentKHR"); - self.swapchain_fn - .queue_present(self.raw, &vk_info) - .map_err(|error| match error { + unsafe { self.swapchain_fn.queue_present(self.raw, &vk_info) }.map_err(|error| { + match error { vk::Result::ERROR_OUT_OF_DATE_KHR => crate::SurfaceError::Outdated, vk::Result::ERROR_SURFACE_LOST_KHR => crate::SurfaceError::Lost, _ => crate::DeviceError::from(error).into(), - })? + } + })? }; if suboptimal { log::warn!("Suboptimal present of frame {}", texture.index); From f56a393d62a2f740771381704271c049eb479ca6 Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Mon, 19 Sep 2022 10:14:32 -0400 Subject: [PATCH 5/6] chore: separate new `unsafe` ops into blocks Unsafe operations can be exhausting to validate by themselves. Therefore, it's often interesting to separate these out so we can justify each individual operation, and let a human reader accumulate and drop supporting safety context in the smallest increments necessary. To that end, this commit can pave the way for future work where we may do something like enable [`clippy::undocumented_unsafe_blocks`], which calls out `unsafe` blocks that do not have a `SAFETY` comment immediately above them. This commit only separates the operations in `unsafe` blocks I added in this same PR; I'm deliberately leaving existing `unsafe` blocks alone, ATM. [`clippy::undocumented_unsafe_blocks`]: https://rust-lang.github.io/rust-clippy/stable/index.html#undocumented_unsafe_blocks --- wgpu-core/src/track/mod.rs | 9 +++------ wgpu-core/src/track/texture.rs | 18 ++++++++---------- wgpu-hal/src/gles/device.rs | 33 ++++++++++++++++----------------- wgpu-hal/src/gles/egl.rs | 19 ++++++++----------- wgpu-hal/src/metal/surface.rs | 5 +++-- wgpu-hal/src/vulkan/device.rs | 5 ++++- 6 files changed, 42 insertions(+), 47 deletions(-) diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 6dcc2674b8..fe44bd9067 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -445,12 +445,9 @@ impl ResourceMetadataProvider<'_, A> { } ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); - (unsafe { *metadata.epochs.get_unchecked(index) }, unsafe { - metadata - .ref_counts - .get_unchecked(index) - .clone() - .unwrap_unchecked() + (unsafe { *metadata.epochs.get_unchecked(index) }, { + let ref_count = unsafe { metadata.ref_counts.get_unchecked(index) }; + unsafe { ref_count.clone().unwrap_unchecked() } }) } ResourceMetadataProvider::Resource { epoch } => { diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index d5f88bdee8..99e8119c4b 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -291,9 +291,10 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; unsafe { insert_or_merge( - texture_data_from_texture(storage, index32), + texture_data, &mut self.set, &mut self.metadata, index32, @@ -359,9 +360,10 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; unsafe { insert_or_merge( - texture_data_from_texture(storage, index32), + texture_data, &mut self.set, &mut self.metadata, index32, @@ -467,13 +469,8 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); - unsafe { - self.metadata - .ref_counts - .get_unchecked(index) - .as_ref() - .unwrap_unchecked() - } + let ref_count = unsafe { self.metadata.ref_counts.get_unchecked(index) }; + unsafe { ref_count.as_ref().unwrap_unchecked() } } /// Inserts a single texture and a state into the resource tracker. @@ -683,9 +680,10 @@ impl TextureTracker { if unsafe { !scope.metadata.owned.get(index).unwrap_unchecked() } { continue; } + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; unsafe { insert_or_barrier_update( - texture_data_from_texture(storage, index32), + texture_data, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 116e228f4c..fa4802f9d8 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -173,7 +173,8 @@ impl super::Device { if gl.supports_debug() { //TODO: remove all transmutes from `object_label` // https://github.com/grovesNL/glow/issues/186 - unsafe { gl.object_label(glow::SHADER, mem::transmute(raw), label) }; + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::SHADER, name, label) }; } unsafe { gl.shader_source(raw, shader) }; @@ -276,7 +277,8 @@ impl super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = label { if gl.supports_debug() { - unsafe { gl.object_label(glow::PROGRAM, mem::transmute(program), Some(label)) }; + let name = unsafe { mem::transmute(program) }; + unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) }; } } @@ -363,12 +365,8 @@ impl super::Device { return Err(crate::DeviceError::Lost.into()); } super::BindingRegister::Textures | super::BindingRegister::Images => { - unsafe { - gl.uniform_1_i32( - gl.get_uniform_location(program, name).as_ref(), - slot as _, - ) - }; + let location = unsafe { gl.get_uniform_location(program, name) }; + unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) }; } } } @@ -516,7 +514,8 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - unsafe { gl.object_label(glow::BUFFER, mem::transmute(raw), Some(label)) }; + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::BUFFER, name, Some(label)) }; } } @@ -660,9 +659,8 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - unsafe { - gl.object_label(glow::RENDERBUFFER, mem::transmute(raw), Some(label)) - }; + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) }; } } @@ -728,7 +726,8 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - unsafe { gl.object_label(glow::TEXTURE, mem::transmute(raw), Some(label)) }; + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) }; } } @@ -876,7 +875,8 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - unsafe { gl.object_label(glow::SAMPLER, mem::transmute(raw), Some(label)) }; + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) }; } } @@ -1170,9 +1170,8 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { temp_string.clear(); let _ = write!(temp_string, "{}[{}]", label, i); - unsafe { - gl.object_label(glow::QUERY, mem::transmute(query), Some(&temp_string)) - }; + let name = unsafe { mem::transmute(query) }; + unsafe { gl.object_label(glow::QUERY, name, Some(&temp_string)) }; } } queries.push(query); diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index d813ae522e..b66047c1ec 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -738,8 +738,9 @@ impl crate::Instance for Instance { && client_ext_str.contains("EGL_KHR_debug") { log::info!("Enabling EGL debug output"); - let function: EglDebugMessageControlFun = unsafe { - std::mem::transmute(egl.get_proc_address("eglDebugMessageControlKHR").unwrap()) + let function: EglDebugMessageControlFun = { + let addr = egl.get_proc_address("eglDebugMessageControlKHR").unwrap(); + unsafe { std::mem::transmute(addr) } }; let attributes = [ EGL_DEBUG_MSG_CRITICAL_KHR as egl::Attrib, @@ -911,9 +912,10 @@ impl super::Adapter { pub unsafe fn new_external( fun: impl FnMut(&str) -> *const ffi::c_void, ) -> Option> { + let context = unsafe { glow::Context::from_loader_function(fun) }; unsafe { Self::expose(AdapterContext { - glow: Mutex::new(glow::Context::from_loader_function(fun)), + glow: Mutex::new(context), egl: None, }) } @@ -1238,14 +1240,9 @@ impl crate::Surface for Surface { .destroy_surface(self.egl.display, surface) .unwrap(); if let Some(window) = wl_window { - let wl_egl_window_destroy: libloading::Symbol = unsafe { - self.wsi - .library - .as_ref() - .expect("unsupported window") - .get(b"wl_egl_window_destroy") - } - .unwrap(); + let library = self.wsi.library.as_ref().expect("unsupported window"); + let wl_egl_window_destroy: libloading::Symbol = + unsafe { library.get(b"wl_egl_window_destroy") }.unwrap(); unsafe { wl_egl_window_destroy(window) }; } } diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 8d86415ef3..fffad30f03 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -83,8 +83,9 @@ impl super::Surface { delegate: Option<&HalManagedMetalLayerDelegate>, ) -> Self { let view = view as *mut Object; - let render_layer = unsafe { - mem::transmute::<_, &mtl::MetalLayerRef>(Self::get_metal_layer(view, delegate)) + let render_layer = { + let layer = unsafe { Self::get_metal_layer(view, delegate) }; + unsafe { mem::transmute::<_, &mtl::MetalLayerRef>(layer) } } .to_owned(); let _: *mut c_void = msg_send![view, retain]; diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index 98243219d0..084be72de9 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -48,13 +48,16 @@ impl super::DeviceShared { .collect(); &buffer_vec }; + + let name = unsafe { CStr::from_bytes_with_nul_unchecked(name_bytes) }; + let _result = unsafe { extension.debug_utils_set_object_name( self.raw.handle(), &vk::DebugUtilsObjectNameInfoEXT::builder() .object_type(object_type) .object_handle(object.as_raw()) - .object_name(CStr::from_bytes_with_nul_unchecked(name_bytes)), + .object_name(name), ) }; } From f411fa7c1dbf23174237a7635b62e4376e10efcb Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Wed, 21 Sep 2022 18:01:27 -0400 Subject: [PATCH 6/6] docs: add `unsafe_op_in_unsafe_fn` note to `README.md` --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 807e686ca3..2a9bc5f0fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -229,6 +229,7 @@ both `raw_window_handle::HasRawWindowHandle` and `raw_window_handle::HasRawDispl - Don't use `PhantomData` for `IdentityManager`'s `Input` type. By @jimblandy in [#2972](https://github.com/gfx-rs/wgpu/pull/2972) - Changed Naga variant in ShaderSource to `Cow<'static, Module>`, to allow loading global variables by @daxpedda in [#2903](https://github.com/gfx-rs/wgpu/pull/2903) - Updated the maximum binding index to match the WebGPU specification by @nical in [#2957](https://github.com/gfx-rs/wgpu/pull/2957) +- Add `unsafe_op_in_unsafe_fn` to Clippy lints in the entire workspace. By @ErichDonGubler in [#3044](https://github.com/gfx-rs/wgpu/pull/3044). #### Metal