Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a way to notify with Queue::submit() to Vulkan's vk::Semaphore allocated outside of wgpu #6813

Merged
merged 1 commit into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion wgpu-core/src/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@ use crate::{
},
global::Global,
hal_api::HalApi,
id::{AdapterId, BufferId, CommandEncoderId, DeviceId, SurfaceId, TextureId, TextureViewId},
id::{
AdapterId, BufferId, CommandEncoderId, DeviceId, QueueId, SurfaceId, TextureId,
TextureViewId,
},
init_tracker::{BufferInitTracker, TextureInitTracker},
lock::{rank, Mutex, RwLock},
resource_log,
Expand Down Expand Up @@ -1388,6 +1391,21 @@ impl Global {
hal_command_encoder_callback(None)
}
}

/// # Safety
///
/// - The raw queue handle must not be manually destroyed
pub unsafe fn queue_as_hal<A: HalApi, F, R>(&self, id: QueueId, hal_queue_callback: F) -> R
where
F: FnOnce(Option<&A::Queue>) -> R,
{
profiling::scope!("Queue::as_hal");

let queue = self.hub.queues.get(id);
let hal_queue = queue.raw().as_any().downcast_ref();

hal_queue_callback(hal_queue)
}
}

/// A texture that has been marked as destroyed and is staged for actual deletion soon.
Expand Down
1 change: 1 addition & 0 deletions wgpu-hal/src/vulkan/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1982,6 +1982,7 @@ impl super::Adapter {
device: Arc::clone(&shared),
family_index,
relay_semaphores: Mutex::new(relay_semaphores),
signal_semaphores: Mutex::new((Vec::new(), Vec::new())),
};

let mem_allocator = {
Expand Down
24 changes: 24 additions & 0 deletions wgpu-hal/src/vulkan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ use std::{
ffi::{CStr, CString},
fmt, mem,
num::NonZeroU32,
ops::DerefMut,
sync::Arc,
};

Expand Down Expand Up @@ -765,6 +766,7 @@ pub struct Queue {
device: Arc<DeviceShared>,
family_index: u32,
relay_semaphores: Mutex<RelaySemaphores>,
signal_semaphores: Mutex<(Vec<vk::Semaphore>, Vec<u64>)>,
}

impl Drop for Queue {
Expand Down Expand Up @@ -1216,6 +1218,15 @@ impl crate::Queue for Queue {
signal_values.push(!0);
}

let mut guards = self.signal_semaphores.lock();
let (ref mut pending_signal_semaphores, ref mut pending_signal_semaphore_values) =
guards.deref_mut();
assert!(pending_signal_semaphores.len() == pending_signal_semaphore_values.len());
if !pending_signal_semaphores.is_empty() {
signal_semaphores.append(pending_signal_semaphores);
signal_values.append(pending_signal_semaphore_values);
}

// In order for submissions to be strictly ordered, we encode a dependency between each submission
// using a pair of semaphores. This adds a wait if it is needed, and signals the next semaphore.
let semaphore_state = self.relay_semaphores.lock().advance(&self.device)?;
Expand Down Expand Up @@ -1344,6 +1355,19 @@ impl crate::Queue for Queue {
}
}

impl Queue {
pub fn raw_device(&self) -> &ash::Device {
&self.device.raw
}

pub fn add_signal_semaphore(&self, semaphore: vk::Semaphore, semaphore_value: Option<u64>) {
let mut guards = self.signal_semaphores.lock();
let (ref mut semaphores, ref mut semaphore_values) = guards.deref_mut();
semaphores.push(semaphore);
semaphore_values.push(semaphore_value.unwrap_or(!0));
}
}

/// Maps
///
/// - VK_ERROR_OUT_OF_HOST_MEMORY
Expand Down
Loading