diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.deny.toml b/.deny.toml new file mode 100644 index 0000000000..5613b6ac26 --- /dev/null +++ b/.deny.toml @@ -0,0 +1,37 @@ +[bans] +multiple-versions = "deny" +skip-tree = [ + { name = "cts_runner" }, + { name = "dummy" }, + { name = "player" }, + { name = "run-wasm" }, + { name = "wgpu-info" }, +] +wildcards = "deny" + +[licenses] +allow = [ + "Apache-2.0", + "Apache-2.0 WITH LLVM-exception", + "BSD-2-Clause", + "BSD-3-Clause", + "CC0-1.0", + "ISC", + "MIT", + "MPL-2.0", + "Unicode-DFS-2016", + "Zlib", +] + +[sources] +allow-git = [ + "https://github.com/grovesNL/glow", +] +unknown-registry = "deny" +unknown-git = "deny" +required-git-spec = "rev" + +[sources.allow-org] +github = [ + "gfx-rs" +] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1a1cb04a00..6c287b927c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,122 +2,96 @@ name: CI on: push: - branches: [master, staging] + branches: ["*"] tags: [v0.*] pull_request: env: RUST_BACKTRACE: 1 - RUST_VERSION: 1.62 + RUST_VERSION: 1.64 + PKG_CONFIG_ALLOW_CROSS: 1 # allow android to work + RUSTFLAGS: --cfg=web_sys_unstable_apis -D warnings + RUSTDOCFLAGS: -Dwarnings + CACHE_SUFFIX: c # cache busting # We distinguish the following kinds of builds: -# - local: build for the same target as we compile on, and do local tests -# - other: build without testing, e.g. cross-build +# - native: build for the same target as we compile on # - web: build for the Web # - em: build for the Emscripten +# For build time and size optimization we disable debug +# entirely on clippy jobs and reduce it to line-numbers +# only for ones where we run tests. +# +# Additionally, we disable incremental builds entirely +# as our caching system doesn't actually cache our crates. +# It adds overhead to the build and another point of failure. + jobs: - build: + check-msrv: strategy: fail-fast: false matrix: include: # Windows - name: Windows x86_64 - os: windows-2019 + os: windows-2022 target: x86_64-pc-windows-msvc - tool: clippy - kind: local - backends: dx12 # dx11 - nextest_url: https://get.nexte.st/latest/windows-tar + kind: native - - name: Windows Nightly x86_64 - os: windows-2019 - target: x86_64-pc-windows-msvc - tool: check - kind: other - # MacOS - name: MacOS x86_64 - os: macos-11 + os: macos-12 target: x86_64-apple-darwin - tool: clippy - # Mac has no software runners, so don't run tests - kind: other + kind: native - name: MacOS aarch64 - os: macos-11 + os: macos-12 target: aarch64-apple-darwin - tool: check - # Mac has no software runners, so don't run tests - kind: other - + kind: native + # IOS - name: IOS aarch64 - os: macos-11 + os: macos-12 target: aarch64-apple-ios - tool: clippy - kind: other - + kind: native # Linux - name: Linux x86_64 - os: ubuntu-20.04 + os: ubuntu-22.04 target: x86_64-unknown-linux-gnu - tool: clippy - kind: local - backends: vulkan gl - nextest_url: https://get.nexte.st/latest/linux + kind: native - name: Linux aarch64 - os: ubuntu-20.04 + os: ubuntu-22.04 target: aarch64-unknown-linux-gnu - tool: check - kind: other + kind: native - - name: Linux Nightly x86_64 - os: ubuntu-20.04 - target: x86_64-unknown-linux-gnu - tool: check - kind: other - - # Android - name: Android aarch64 - os: ubuntu-20.04 + os: ubuntu-22.04 target: aarch64-linux-android - tool: clippy - kind: other + kind: native - # WebGPU/WebGL - name: WebAssembly - os: ubuntu-20.04 + os: ubuntu-22.04 target: wasm32-unknown-unknown - tool: clippy kind: web - name: Emscripten - os: ubuntu-20.04 + os: ubuntu-22.04 target: wasm32-unknown-emscripten - tool: clippy kind: em - name: Check ${{ matrix.name }} + name: Clippy ${{ matrix.name }} runs-on: ${{ matrix.os }} - env: - PKG_CONFIG_ALLOW_CROSS: 1 # allow android to work - RUSTFLAGS: --cfg=web_sys_unstable_apis -D warnings - RUSTDOCFLAGS: -Dwarnings - steps: - name: checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - # Only run clippy on MSRV - - name: install rust stable - if: matrix.tool == 'clippy' + - name: install rust ${{ env.RUST_VERSION }} uses: actions-rs/toolchain@v1 with: toolchain: ${{ env.RUST_VERSION }} @@ -126,118 +100,217 @@ jobs: override: true components: clippy - # Other builds can use nightly - - name: install rust nightly - if: matrix.tool != 'clippy' - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - target: ${{ matrix.target }} - profile: minimal - override: true + - name: disable debug + shell: bash + run: | + mkdir -p .cargo + echo """ + [profile.dev] + incremental = false + debug = false" >> .cargo/config.toml - name: caching - uses: Swatinem/rust-cache@v1 + uses: Swatinem/rust-cache@v2 with: - key: ${{ matrix.target }}-b # suffix for cache busting - - - name: download nextest - if: matrix.kind == 'local' - shell: bash - run: | - curl -LsSf ${{ matrix.nextest_url }} | tar xzf - -C ${CARGO_HOME:-~/.cargo}/bin + key: clippy-${{ matrix.target }}-${{ matrix.kind }}-${{ env.CACHE_SUFFIX }} - name: add android apk to path - if: matrix.os == 'ubuntu-20.04' && matrix.target == 'aarch64-linux-android' + if: matrix.target == 'aarch64-linux-android' run: | echo "$ANDROID_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH - - name: install llvmpipe, lavapipe, and vulkan sdk - if: matrix.os == 'ubuntu-20.04' && matrix.target != 'aarch64-linux-android' && matrix.kind == 'local' - run: | - sudo apt-get update -y -qq - - # llvmpipe/lavapipe - sudo add-apt-repository ppa:oibaf/graphics-drivers -y - - # vulkan sdk - wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - - sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-focal.list https://packages.lunarg.com/vulkan/lunarg-vulkan-focal.list - - sudo apt-get update - sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers vulkan-sdk - - # We enable line numbers for panics, but that's it - - name: disable debug - shell: bash - run: | - mkdir -p .cargo - echo """\n[profile.dev] - debug = 1" >> .cargo/config.toml - - name: check web if: matrix.kind == 'web' + shell: bash run: | - # build with no features - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu --no-default-features - - # build examples - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu --examples + set -e - # build with features - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu --features glsl,spirv + # build for WebGPU + cargo clippy --target ${{ matrix.target }} -p wgpu --tests --features glsl,spirv # build for WebGL - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu --features webgl + cargo clippy --target ${{ matrix.target }} -p wgpu --tests --features webgl,glsl,spirv # build docs - cargo doc --target ${{ matrix.target }} -p wgpu --no-deps + cargo doc --target ${{ matrix.target }} -p wgpu --no-deps --features glsl,spirv - name: check em if: matrix.kind == 'em' + shell: bash run: | + set -e + # build for Emscripten/WebGL - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu -p wgpu-hal --no-default-features --features webgl,emscripten + cargo clippy --target ${{ matrix.target }} -p wgpu -p wgpu-hal \ + --no-default-features --features webgl,emscripten + + # build cube example + cargo clippy --target ${{ matrix.target }} --example cube --features webgl,emscripten # build raw-gles example - cargo ${{matrix.tool}} --target ${{ matrix.target }} --example raw-gles --features webgl,emscripten + cargo clippy --target ${{ matrix.target }} --example raw-gles --features webgl,emscripten - name: check native - if: matrix.kind == 'local' || matrix.kind == 'other' + if: matrix.kind == 'native' + shell: bash run: | + set -e + # check with no features - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu -p wgpu-core -p wgpu-info -p player --no-default-features + cargo clippy --target ${{ matrix.target }} -p wgpu -p wgpu-core -p wgpu-info -p player --no-default-features # check with all features - # explicitly don't mention wgpu-hal so that --all-features don't apply to it - cargo ${{matrix.tool}} --target ${{ matrix.target }} -p wgpu -p wgpu-core -p wgpu-info -p player --examples --tests --all-features + cargo clippy --target ${{ matrix.target }} -p wgpu -p wgpu-core -p wgpu-info -p player --tests --all-features # build docs - cargo doc --target ${{ matrix.target }} --no-deps cargo doc --target ${{ matrix.target }} -p wgpu -p wgpu-core -p wgpu-info -p player --all-features --no-deps - - name: local tests - if: matrix.kind == 'local' + gpu-test: + strategy: + fail-fast: false + matrix: + include: + # Windows + - name: Windows x86_64 + os: windows-2022 + backends: dx12 + + # Linux + - name: Linux x86_64 + os: ubuntu-22.04 + backends: vulkan gl + + name: Test ${{ matrix.name }} + runs-on: ${{ matrix.os }} + + steps: + - name: checkout repo + uses: actions/checkout@v3 + + - name: install rust stable + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + profile: minimal + override: true + components: llvm-tools-preview + + - name: latest cargo-nextest + uses: taiki-e/install-action@nextest + - name: install cargo-llvm-cov + uses: taiki-e/install-action@cargo-llvm-cov + + - name: install swiftshader + if: matrix.os == 'ubuntu-22.04' + shell: bash + run: | + set -e + + mkdir -p swiftshader + curl -LsSf https://github.com/gfx-rs/ci-build/releases/latest/download/swiftshader-linux-x86_64.tar.xz | tar -xf - -C swiftshader + + echo "LD_LIBRARY_PATH=$PWD/swiftshader" >> $GITHUB_ENV + + - name: install llvmpipe, vulkan sdk + if: matrix.os == 'ubuntu-22.04' + shell: bash + run: | + set -e + + sudo apt-get update -y -qq + + # vulkan sdk + wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - + sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list + + sudo apt-get update + sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev vulkan-sdk + + - name: disable debug + shell: bash + run: | + mkdir -p .cargo + echo """ + [profile.dev] + incremental = false + debug = 1" >> .cargo/config.toml + + - name: caching + uses: Swatinem/rust-cache@v2 + with: + key: test-${{ matrix.os }}-${{ env.CACHE_SUFFIX }} + + - name: run wgpu-info + shell: bash + run: | + set -e + + cargo llvm-cov run --bin wgpu-info --no-report + + - name: run tests shell: bash run: | - # run wgpu-info - cargo run --bin wgpu-info --features angle,vulkan-portability - # run unit and player tests - cargo nextest run -p wgpu-types -p wgpu-hal -p wgpu-core -p player --no-fail-fast - # run native tests + set -e + for backend in ${{ matrix.backends }}; do echo "======= NATIVE TESTS $backend ======"; - WGPU_BACKEND=$backend cargo nextest run -p wgpu --no-fail-fast - # Test that we catch overflows in `--release` builds too. - WGPU_BACKEND=$backend cargo nextest run --release -p wgpu --no-fail-fast + WGPU_BACKEND=$backend cargo llvm-cov nextest -p wgpu -p wgpu-types -p wgpu-hal -p wgpu-core -p player --no-fail-fast --no-report done + - name: generate coverage report + shell: bash + run: | + set -e + + cargo llvm-cov report --lcov --output-path lcov.info + + - name: upload coverage report to codecov + uses: codecov/codecov-action@v3 + with: + files: lcov.info + + doctest: + name: Doctest + runs-on: ubuntu-22.04 + + steps: + - name: checkout repo + uses: actions/checkout@v3 + + - name: install rust stable + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + override: true + + - name: disable debug + shell: bash + run: | + mkdir -p .cargo + echo """ + [profile.dev] + incremental = false + debug = 1" >> .cargo/config.toml + + - name: caching + uses: Swatinem/rust-cache@v2 + with: + key: clippy-${{ matrix.target }}-${{ matrix.kind }}-${{ env.CACHE_SUFFIX }} + + - name: run doctests + shell: bash + run: | + set -e + + cargo test --doc + fmt: name: Format runs-on: ubuntu-latest steps: - name: checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: install rust uses: actions-rs/toolchain@v1 @@ -251,19 +324,63 @@ jobs: run: | cargo fmt -- --check - deno: - name: Deno + check-msrv-cts_runner: + name: Clippy cts_runner runs-on: ubuntu-latest steps: - name: checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - - name: install rust + - name: install rust ${{ env.RUST_VERSION }} uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: ${{ env.RUST_VERSION }} + override: true + components: clippy + + - name: disable debug + shell: bash + run: | + mkdir -p .cargo + echo """ + [profile.dev] + incremental = false + debug = 1" >> .cargo/config.toml + + - name: caching + uses: Swatinem/rust-cache@v2 + with: + key: cts_runner-${{ env.CACHE_SUFFIX }} - name: build Deno run: | - cargo check --manifest-path cts_runner/Cargo.toml + cargo clippy --manifest-path cts_runner/Cargo.toml + + cargo-deny-check-advisories: + name: "Run `cargo deny check advisories`" + runs-on: ubuntu-latest + steps: + - name: checkout repo + uses: actions/checkout@v3 + + - name: Run `cargo deny check` + uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check advisories + arguments: --all-features --workspace + rust-version: ${{ env.RUST_VERSION }} + + cargo-deny-check-rest: + name: "Run `cargo deny check`" + runs-on: ubuntu-latest + steps: + - name: checkout repo + uses: actions/checkout@v3 + + - name: Run `cargo deny check` + uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check bans licenses sources + arguments: --all-features --workspace + rust-version: ${{ env.RUST_VERSION }} diff --git a/.github/workflows/cts.yml b/.github/workflows/cts.yml index 8fd788c7dd..332fc7fb6d 100644 --- a/.github/workflows/cts.yml +++ b/.github/workflows/cts.yml @@ -9,6 +9,7 @@ on: env: RUST_BACKTRACE: 1 + RUST_VERSION: 1.64 jobs: cts: @@ -46,12 +47,13 @@ jobs: cd cts git checkout $(cat ../wgpu/cts_runner/revision.txt) - - name: install rust + - name: install rust ${{ env.RUST_VERSION }} uses: actions-rs/toolchain@v1 with: - toolchain: stable + toolchain: ${{ env.RUST_VERSION }} target: ${{ matrix.target }} profile: minimal + override: true - name: caching uses: Swatinem/rust-cache@v1 @@ -91,4 +93,4 @@ jobs: echo "=== Running $test ==="; DENO_WEBGPU_BACKEND=$backend cargo run --manifest-path ../wgpu/cts_runner/Cargo.toml --frozen -- ./tools/run_deno --verbose "$test"; done - done \ No newline at end of file + done diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index df62787002..8e2c9885db 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -30,6 +30,8 @@ jobs: - name: Build the docs (nightly) run: | cargo +nightly doc --no-deps --lib + env: + RUSTDOCFLAGS: --cfg docsrs - name: Build the docs (stable) run: cargo +stable doc --no-deps --lib @@ -42,4 +44,4 @@ jobs: FOLDER: target/doc REPOSITORY_NAME: gfx-rs/wgpu-rs.github.io BRANCH: master - TARGET_FOLDER: doc + TARGET_FOLDER: doc diff --git a/.github/workflows/lazy.yaml b/.github/workflows/lazy.yaml deleted file mode 100644 index 352e57cc8d..0000000000 --- a/.github/workflows/lazy.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Lazy jobs running on master post merges. -name: Lazy -on: - push: - branches: [master] - -jobs: - coverage: - name: Coverage - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - run: sudo apt-get update -y -qq - - run: sudo add-apt-repository ppa:ubuntu-x-swat/updates -y - - run: sudo apt-get update - - run: sudo apt install -y libxcb-xfixes0-dev mesa-vulkan-drivers - - name: Generate report - uses: actions-rs/tarpaulin@v0.1 - with: - args: '-p wgpu-core -p wgpu-hal' - - name: Upload to codecov.io - uses: codecov/codecov-action@v1 - with: - token: ${{ secrets.CODECOV_TOKEN }} - - name: Archive code coverage results - uses: actions/upload-artifact@v1 - with: - name: code-coverage-report - path: cobertura.xml diff --git a/CHANGELOG.md b/CHANGELOG.md index 432e5f13c9..030960c49c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,22 +34,54 @@ Bottom level categories: - DX11 - GLES - WebGPU -- Enscripten +- Emscripten - Hal --> ## Unreleased +### Major Changes + +#### Surface Capabilities API + +The various surface capability functions were combined into a single call that gives you all the capabilities. + +```diff +- let formats = surface.get_supported_formats(&adapter); +- let present_modes = surface.get_supported_present_modes(&adapter); +- let alpha_modes = surface.get_supported_alpha_modes(&adapter); ++ let caps = surface.get_capabilities(&adapter); ++ let formats = caps.formats; ++ let present_modes = caps.present_modes; ++ let alpha_modes = caps.alpha_modes; +``` + +Additionally `Surface::get_default_config` now returns an Option and returns None if the surface isn't supported by the adapter. + +```diff +- let config = surface.get_default_config(&adapter); ++ let config = surface.get_default_config(&adapter).expect("Surface unsupported by adapter"); +``` + ### Changes #### General - Convert all `Default` Implementations on Enums to `derive(Default)` - Implement `Default` for `CompositeAlphaMode` +- Improve compute shader validation error message. By @haraldreingruber in [#3139](https://github.com/gfx-rs/wgpu/pull/3139) +- New downlevel feature `UNRESTRICTED_INDEX_BUFFER` to indicate support for using `INDEX` together with other non-copy/map usages (unsupported on WebGL). By @Wumpf in [#3157](https://github.com/gfx-rs/wgpu/pull/3157) +- Combine `Surface::get_supported_formats`, `Surface::get_supported_present_modes`, and `Surface::get_supported_alpha_modes` into `Surface::get_capabilities` and `SurfaceCapabilities`. By @cwfitzgerald in [#3157](https://github.com/gfx-rs/wgpu/pull/3157) +- Make `Surface::get_default_config` return an Option to prevent panics. By @cwfitzgerald in [#3157](https://github.com/gfx-rs/wgpu/pull/3157) #### WebGPU + - Implement `queue_validate_write_buffer` by @jinleili in [#3098](https://github.com/gfx-rs/wgpu/pull/3098) +#### GLES + +- Browsers that support `OVR_multiview2` now report the `MULTIVIEW` feature by @expenses in [#3121](https://github.com/gfx-rs/wgpu/pull/3121). + ### Added/New Features #### General @@ -58,38 +90,80 @@ Bottom level categories: - Add the `"wgsl"` feature, to enable WGSL shaders in `wgpu-core` and `wgpu`. Enabled by default in `wgpu`. By @daxpedda in [#2890](https://github.com/gfx-rs/wgpu/pull/2890). - Implement `Clone` for `ShaderSource` and `ShaderModuleDescriptor` in `wgpu`. By @daxpedda in [#3086](https://github.com/gfx-rs/wgpu/pull/3086). - Add `get_default_config` for `Surface` to simplify user creation of `SurfaceConfiguration`. By @jinleili in [#3034](https://github.com/gfx-rs/wgpu/pull/3034) +- Native adapters can now use MSAA x2 and x8 if it's supported , previously only x1 and x4 were supported . By @39ali in [3140](https://github.com/gfx-rs/wgpu/pull/3140) #### GLES - Surfaces support now `TextureFormat::Rgba8Unorm` and (non-web only) `TextureFormat::Bgra8Unorm`. By @Wumpf in [#3070](https://github.com/gfx-rs/wgpu/pull/3070) +- Support alpha to coverage. By @Wumpf in [#3156](https://github.com/gfx-rs/wgpu/pull/3156) + +#### WebGPU + +- Add `MULTISAMPLE_X2`, `MULTISAMPLE_X4` and `MULTISAMPLE_X8` to `TextureFormatFeatureFlags`. By @39ali in [3140](https://github.com/gfx-rs/wgpu/pull/3140) ### Bug Fixes #### General +- Update ndk-sys to v0.4.1+23.1.7779620, to fix checksum failures. By @jimblandy in [#3232](https://github.com/gfx-rs/wgpu/pull/3232). - Bother to free the `hal::Api::CommandBuffer` when a `wgpu_core::command::CommandEncoder` is dropped. By @jimblandy in [#3069](https://github.com/gfx-rs/wgpu/pull/3069). - Fixed the mipmap example by adding the missing WRITE_TIMESTAMP_INSIDE_PASSES feature. By @Olaroll in [#3081](https://github.com/gfx-rs/wgpu/pull/3081). - Avoid panicking in some interactions with invalid resources by @nical in (#3094)[https://github.com/gfx-rs/wgpu/pull/3094] - Fixed an integer overflow in `copy_texture_to_texture` by @nical [#3090](https://github.com/gfx-rs/wgpu/pull/3090) +- Remove `wgpu_types::Features::DEPTH24PLUS_STENCIL8`, making `wgpu::TextureFormat::Depth24PlusStencil8` available on all backends. By @Healthire in (#3151)[https://github.com/gfx-rs/wgpu/pull/3151] +- Fix an integer overflow in `queue_write_texture` by @nical in (#3146)[https://github.com/gfx-rs/wgpu/pull/3146] +- Make `RenderPassCompatibilityError` and `CreateShaderModuleError` not so huge. By @jimblandy in (#3226)[https://github.com/gfx-rs/wgpu/pull/3226] +- Check for invalid bitflag bits in wgpu-core and allow them to be captured/replayed by @nical in (#3229)[https://github.com/gfx-rs/wgpu/pull/3229] #### WebGPU + - Use `log` instead of `println` in hello example by @JolifantoBambla in [#2858](https://github.com/gfx-rs/wgpu/pull/2858) #### GLES - Fixed WebGL not displaying srgb targets correctly if a non-screen filling viewport was previously set. By @Wumpf in [#3093](https://github.com/gfx-rs/wgpu/pull/3093) +- Fix disallowing multisampling for float textures if otherwise supported. By @Wumpf in [#3183](https://github.com/gfx-rs/wgpu/pull/3183) + +#### deno-webgpu + +- Let `setVertexBuffer` and `setIndexBuffer` calls on + `GPURenderBundleEncoder` throw an error if the `size` argument is + zero, rather than treating that as "until the end of the buffer". + By @jimblandy in [#3171](https://github.com/gfx-rs/wgpu/pull/3171) + +#### Emscripten + +- Let the wgpu examples `framework.rs` compile again under Emscripten. By @jimblandy in [#3246](https://github.com/gfx-rs/wgpu/pull/3246) ### Examples + - Log adapter info in hello example on wasm target by @JolifantoBambla in [#2858](https://github.com/gfx-rs/wgpu/pull/2858) ### Testing/Internal -- Update the `minimum supported rust version` to 1.62 +- Update the `minimum supported rust version` to 1.64 +- Use cargo 1.64 workspace inheritance feature. By @jinleili in [#3107](https://github.com/gfx-rs/wgpu/pull/3107) +- Move `ResourceMetadata` into its own module. By @jimblandy in [#3213](https://github.com/gfx-rs/wgpu/pull/3213) #### Vulkan - Don't use a pointer to a local copy of a `PhysicalDeviceDriverProperties` struct after it has gone out of scope. In fact, don't make a local copy at all. Introduce a helper function for building `CStr`s from C character arrays, and remove some `unsafe` blocks. By @jimblandy in [#3076](https://github.com/gfx-rs/wgpu/pull/3076). + +## wgpu-0.14.2 (2022-11-28) + +### Bug Fixes + +- Fix incorrect offset in `get_mapped_range` by @nical in [#3233](https://github.com/gfx-rs/wgpu/pull/3233) + + +## wgpu-0.14.1 (2022-11-02) + +### Bug Fixes + +- Make `wgpu::TextureFormat::Depth24PlusStencil8` available on all backends by making the feature unconditionally available and the feature unneeded to use the format. By @Healthire and @cwfitzgerald in [#3165](https://github.com/gfx-rs/wgpu/pull/3165) + + ## wgpu-0.14.0 (2022-10-05) ### Major Changes @@ -181,6 +255,7 @@ both `raw_window_handle::HasRawWindowHandle` and `raw_window_handle::HasRawDispl - Report Apple M2 gpu as integrated. By @i509VCB [#3036](https://github.com/gfx-rs/wgpu/pull/3036) #### WebGPU + - When called in a web worker, `Context::init()` now uses `web_sys::WorkerGlobalContext` to create a `wgpu::Instance` instead of trying to access the unavailable `web_sys::Window` by @JolifantoBambla in [#2858](https://github.com/gfx-rs/wgpu/pull/2858) ### Changes @@ -197,6 +272,7 @@ both `raw_window_handle::HasRawWindowHandle` and `raw_window_handle::HasRawDispl - Don't use `PhantomData` for `IdentityManager`'s `Input` type. By @jimblandy in [#2972](https://github.com/gfx-rs/wgpu/pull/2972) - Changed Naga variant in ShaderSource to `Cow<'static, Module>`, to allow loading global variables by @daxpedda in [#2903](https://github.com/gfx-rs/wgpu/pull/2903) - Updated the maximum binding index to match the WebGPU specification by @nical in [#2957](https://github.com/gfx-rs/wgpu/pull/2957) +- Add `unsafe_op_in_unsafe_fn` to Clippy lints in the entire workspace. By @ErichDonGubler in [#3044](https://github.com/gfx-rs/wgpu/pull/3044). #### Metal @@ -309,6 +385,7 @@ Added items to the public API - Validate that map_async's range is not negative by @nical in [#2938](https://github.com/gfx-rs/wgpu/pull/2938) - Fix calculation/validation of layer/mip ranges in create_texture_view by @nical in [#2955](https://github.com/gfx-rs/wgpu/pull/2955) - Validate the sample count and mip level in `copy_texture_to_buffer` by @nical in [#2958](https://github.com/gfx-rs/wgpu/pull/2958) +- Expose the cause of the error in the `map_async` callback in [#2939](https://github.com/gfx-rs/wgpu/pull/2939) #### DX12 @@ -335,6 +412,7 @@ Added items to the public API - Update present_mode docs as most of them don't automatically fall back to Fifo anymore. by @Elabajaba in [#2855](https://github.com/gfx-rs/wgpu/pull/2855) #### Hal + - Document safety requirements for `Adapter::from_external` in gles hal by @i509VCB in [#2863](https://github.com/gfx-rs/wgpu/pull/2863) - Make `AdapterContext` a publicly accessible type in the gles hal by @i509VCB in [#2870](https://github.com/gfx-rs/wgpu/pull/2870) diff --git a/Cargo.lock b/Cargo.lock index a95cefa901..021fdf6d78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,6 +93,17 @@ version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +[[package]] +name = "async-trait" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "atty" version = "0.2.14" @@ -120,12 +131,27 @@ dependencies = [ "safemem", ] +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + [[package]] name = "base64" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64-simd" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "278c7ba87265587c4823cf1b2fdf57834151540b2e509574adb03627f8c7f22d" +dependencies = [ + "simd-abstraction", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -147,16 +173,6 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" -[[package]] -name = "bitflags_serde_shim" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25c3d626f0280ec39b33a6fc5c6c1067432b4c41e94aee40ded197a6649bf025" -dependencies = [ - "bitflags", - "serde", -] - [[package]] name = "block" version = "0.1.6" @@ -195,6 +211,12 @@ version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +[[package]] +name = "bytes" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" + [[package]] name = "cache-padded" version = "1.2.0" @@ -331,6 +353,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -416,6 +444,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "cts_runner" +version = "0.1.0" +dependencies = [ + "deno_console", + "deno_core", + "deno_url", + "deno_web", + "deno_webgpu", + "deno_webidl", + "termcolor", + "tokio", +] + [[package]] name = "cty" version = "0.2.2" @@ -479,6 +521,113 @@ dependencies = [ "enum_primitive", ] +[[package]] +name = "deno_console" +version = "0.69.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02cd684bd0097101aa8b411dac1e955ded1bc9c0945a453b607af929ca33b380" +dependencies = [ + "deno_core", +] + +[[package]] +name = "deno_core" +version = "0.151.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390bdf983f9f20d403b09894ce4d9baeb980fa5faa33bf859c47ffa729abd157" +dependencies = [ + "anyhow", + "deno_ops", + "futures", + "indexmap", + "libc", + "log", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "serde", + "serde_json", + "serde_v8", + "sourcemap", + "url", + "v8", +] + +[[package]] +name = "deno_ops" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73e5775de06dc4589c43fa5d81d962f8db9640ccf214291625300e6bf6f3e806" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "syn", +] + +[[package]] +name = "deno_url" +version = "0.69.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc823c3e01d24a2a55e8e9b23fcdcfdf376c039a24b3e3571b9b17630f05186" +dependencies = [ + "deno_core", + "serde", + "serde_repr", + "urlpattern", +] + +[[package]] +name = "deno_web" +version = "0.100.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "204620cb1ce3ec06b74d51fb3370c4fcb25d34d42b0557b6e97bbe84ea64e770" +dependencies = [ + "async-trait", + "base64-simd", + "deno_core", + "encoding_rs", + "flate2", + "serde", + "tokio", + "uuid", +] + +[[package]] +name = "deno_webgpu" +version = "0.63.0" +dependencies = [ + "deno_core", + "serde", + "tokio", + "wgpu-core", + "wgpu-types", +] + +[[package]] +name = "deno_webidl" +version = "0.69.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3bbfcb5416924c4b7ed50514d6577d8a87a61772a043daabe00d81734f5cb07" +dependencies = [ + "deno_core", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version 0.4.0", + "syn", +] + [[package]] name = "devserver_lib" version = "0.4.1" @@ -508,7 +657,7 @@ checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] name = "dummy" -version = "0.1.0" +version = "0.14.0" dependencies = [ "wgpu-core", ] @@ -527,6 +676,21 @@ dependencies = [ "wio", ] +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + [[package]] name = "enum_primitive" version = "0.1.1" @@ -632,6 +796,15 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + [[package]] name = "freetype-rs" version = "0.26.0" @@ -654,12 +827,58 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "fslock" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57eafdd0c16f57161105ae1b98a1238f97645f2f588438b2949c99a2af9616bf" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "futures" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" +dependencies = [ + "futures-core", + "futures-sink", +] + [[package]] name = "futures-core" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" +[[package]] +name = "futures-executor" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + [[package]] name = "futures-intrusive" version = "0.4.0" @@ -692,6 +911,47 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-macro" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" + +[[package]] +name = "futures-task" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" + +[[package]] +name = "futures-util" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + [[package]] name = "fxhash" version = "0.2.1" @@ -743,7 +1003,7 @@ checksum = "518faa5064866338b013ff9b2350dc318e14cc4fcd6cb8206d7e7c9886c98815" [[package]] name = "glow" version = "0.11.2" -source = "git+https://github.com/grovesNL/glow/?rev=c8a011fcd57a5c68cc917ed394baa484bdefc909#c8a011fcd57a5c68cc917ed394baa484bdefc909" +source = "git+https://github.com/grovesNL/glow?rev=c8a011fcd57a5c68cc917ed394baa484bdefc909#c8a011fcd57a5c68cc917ed394baa484bdefc909" dependencies = [ "js-sys", "slotmap", @@ -906,6 +1166,22 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "if_chain" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" + [[package]] name = "indexmap" version = "1.9.1" @@ -1091,7 +1367,7 @@ dependencies = [ [[package]] name = "naga" version = "0.10.0" -source = "git+https://github.com/gfx-rs/naga?rev=c52d9102#c52d91023d43092323615fcc746162e478033f26" +source = "git+https://github.com/gfx-rs/naga?rev=e7fc8e6#e7fc8e64f2f23397b149217ecce6e123c5aa5092" dependencies = [ "bit-set", "bitflags", @@ -1167,9 +1443,9 @@ dependencies = [ [[package]] name = "ndk-sys" -version = "0.4.0" +version = "0.4.1+23.1.7779620" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21d83ec9c63ec5bf950200a8e508bdad6659972187b625469f58ef8c08e29046" +checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3" dependencies = [ "jni-sys", ] @@ -1224,6 +1500,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "num_enum" version = "0.5.7" @@ -1361,12 +1647,38 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" +[[package]] +name = "pin-project" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + [[package]] name = "pkg-config" version = "0.3.25" @@ -1375,7 +1687,7 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "player" -version = "0.1.0" +version = "0.14.0" dependencies = [ "env_logger", "log", @@ -1583,7 +1895,7 @@ dependencies = [ [[package]] name = "run-wasm" -version = "0.1.0" +version = "0.14.0" dependencies = [ "cargo-run-wasm", ] @@ -1600,6 +1912,24 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.14", +] + [[package]] name = "ryu" version = "1.0.11" @@ -1645,6 +1975,27 @@ dependencies = [ "tiny-skia", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "serde" version = "1.0.145" @@ -1654,6 +2005,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_bytes" +version = "0.11.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.145" @@ -1671,11 +2031,37 @@ version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ + "indexmap", "itoa", "ryu", "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_v8" +version = "0.62.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f30310f753d4b1347acdd669a30b8e6208029cfbb28d3d91012b19333eeff1" +dependencies = [ + "bytes", + "derive_more", + "serde", + "serde_bytes", + "smallvec", + "v8", +] + [[package]] name = "servo-fontconfig" version = "0.5.1" @@ -1707,6 +2093,21 @@ dependencies = [ "libc", ] +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "simd-abstraction" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2880f3f7b392823ee65bbcc681961cd8e698c6a30e91ab9b4eef1f9c6c226d8" + [[package]] name = "slab" version = "0.4.7" @@ -1750,6 +2151,32 @@ dependencies = [ "wayland-protocols", ] +[[package]] +name = "socket2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "sourcemap" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e031f2463ecbdd5f34c950f89f5c1e1032f22c0f8e3dc4bdb2e8b6658cf61eb" +dependencies = [ + "base64 0.11.0", + "if_chain", + "lazy_static", + "regex", + "rustc_version 0.2.3", + "serde", + "serde_json", + "url", +] + [[package]] name = "spirv" version = "0.2.0+1.5.4" @@ -1851,6 +2278,52 @@ dependencies = [ "bytemuck", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "1.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +dependencies = [ + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "parking_lot 0.12.1", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "winapi", +] + +[[package]] +name = "tokio-macros" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "toml" version = "0.5.9" @@ -1860,12 +2333,68 @@ dependencies = [ "serde", ] +[[package]] +name = "unic-char-property" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" +dependencies = [ + "unic-char-range", +] + +[[package]] +name = "unic-char-range" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" + +[[package]] +name = "unic-common" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" + +[[package]] +name = "unic-ucd-ident" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e230a37c0381caa9219d67cf063aa3a375ffed5bf541a452db16e744bdab6987" +dependencies = [ + "unic-char-property", + "unic-char-range", + "unic-ucd-version", +] + +[[package]] +name = "unic-ucd-version" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" +dependencies = [ + "unic-common", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + [[package]] name = "unicode-ident" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-segmentation" version = "1.10.0" @@ -1884,6 +2413,54 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "url" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "urlpattern" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9bd5ff03aea02fa45b13a7980151fe45009af1980ba69f651ec367121a31609" +dependencies = [ + "derive_more", + "regex", + "serde", + "unic-ucd-ident", + "url", +] + +[[package]] +name = "uuid" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +dependencies = [ + "getrandom 0.2.7", + "serde", +] + +[[package]] +name = "v8" +version = "0.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a1cbad73336d67babcbe5e3b03c907c8d2ff77fc6f997570af219bbd9fdb6ce" +dependencies = [ + "bitflags", + "fslock", + "lazy_static", + "libc", + "which", +] + [[package]] name = "vec_map" version = "0.8.2" @@ -2205,6 +2782,7 @@ dependencies = [ "async-executor", "bitflags", "bytemuck", + "cfg-if", "console_error_panic_hook", "console_log", "ddsfile", @@ -2310,11 +2888,21 @@ name = "wgpu-types" version = "0.14.0" dependencies = [ "bitflags", - "bitflags_serde_shim", "serde", "serde_json", ] +[[package]] +name = "which" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +dependencies = [ + "either", + "libc", + "once_cell", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 7d3044ae3d..0a3262a7e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,8 @@ [workspace] resolver = "2" members = [ + "cts_runner", + "deno_webgpu", "dummy", "player", "wgpu", @@ -8,14 +10,122 @@ members = [ "wgpu-hal", "wgpu-info", "wgpu-types", - "run-wasm", -] -exclude = [ - "cts_runner", - "deno_webgpu", + "run-wasm" ] +exclude = [] default-members = ["wgpu", "wgpu-hal", "wgpu-info"] +[workspace.package] +edition = "2021" +rust-version = "1.64" +keywords = ["graphics"] +license = "MIT OR Apache-2.0" +homepage = "https://wgpu.rs/" +repository = "https://github.com/gfx-rs/wgpu" +version = "0.14.0" +authors = ["wgpu developers"] + +[workspace.dependencies.wgc] +package = "wgpu-core" +path = "./wgpu-core" + +[workspace.dependencies.wgt] +package = "wgpu-types" +path = "./wgpu-types" + +[workspace.dependencies.hal] +package = "wgpu-hal" +path = "./wgpu-hal" + +[workspace.dependencies.naga] +git = "https://github.com/gfx-rs/naga" +rev = "e7fc8e6" +version = "0.10" + +[workspace.dependencies] +arrayvec = "0.7" +async-executor = "1.0" +bitflags = "1" +bit-vec = "0.6" +bytemuck = "1.4" +cargo-run-wasm = "0.2.0" +cfg_aliases = "0.1" +cfg-if = "1" +codespan-reporting = "0.11" +ddsfile = "0.5" +env_logger = "0.9" +futures-intrusive = "0.4" +fxhash = "0.2.1" +glam = "0.21.3" +libloading = "0.7" +log = "0.4" +nanorand = { version = "0.7", default-features = false } +# Opt out of noise's "default-features" to avoid "image" feature as a dependency count optimization. +# This will not be required in the next release since it has been removed from the default feature in https://github.com/Razaekel/noise-rs/commit/1af9e1522236b2c584fb9a02150c9c67a5e6bb04#diff-2e9d962a08321605940b5a657135052fbcef87b5e360662bb527c96d9a615542 +noise = { version = "0.7", default-features = false } +obj = "0.10" +# parking_lot 0.12 switches from `winapi` to `windows`; permit either +parking_lot = ">=0.11,<0.13" +png = "0.17.5" +pollster = "0.2" +profiling = { version = "1", default-features = false } +raw-window-handle = "0.5" +renderdoc-sys = "0.7.1" +ron = "0.8" +serde = "1" +serde_json = "1.0.85" +smallvec = "1" +static_assertions = "1.1.0" +thiserror = "1" +wgpu = { version = "0.14", path = "./wgpu" } +winit = "0.27.1" + +# Metal dependencies +block = "0.1" +foreign-types = "0.3" +mtl = { package = "metal", version = "0.24.0" } +objc = "0.2.5" +core-graphics-types = "0.1" + +# Vulkan dependencies +ash = "0.37" +gpu-alloc = "0.5" +gpu-descriptor = "0.2" +android_system_properties = "0.1.1" + +# DX dependencies +bit-set = "0.5" +native = { package = "d3d12", version = "0.5.0" } +range-alloc = "0.1" +winapi = "0.3" + +# Gles dependencies +egl = { package = "khronos-egl", version = "4.1" } +# glow = { version = "0.11.2", optional = true } +# TODO: New glow release +glow = { git = "https://github.com/grovesNL/glow", rev = "c8a011fcd57a5c68cc917ed394baa484bdefc909" } +glutin = "0.29.1" + +# wasm32 dependencies +console_error_panic_hook = "0.1.7" +console_log = "0.2" +js-sys = "0.3.60" +wasm-bindgen = "0.2.83" +wasm-bindgen-futures = "0.4.33" +web-sys = "0.3.60" + +# deno dependencies +deno_console = "0.69.0" +deno_core = "0.151.0" +deno_url = "0.69.0" +deno_web = "0.100.0" +deno_webidl = "0.69.0" +deno_webgpu = { path = "./deno_webgpu" } +tokio = "1.19.0" +termcolor = "1.1.2" +wgpu-core = { path = "./wgpu-core" } +wgpu-types = { path = "./wgpu-types" } + [patch."https://github.com/gfx-rs/naga"] #naga = { path = "../naga" } diff --git a/README.md b/README.md index 7fe11cb7d9..6cc44d0184 100644 --- a/README.md +++ b/README.md @@ -31,15 +31,32 @@ For an overview of all the components in the gfx-rs ecosystem, see [the big pict ### MSRV policy -Minimum Supported Rust Version is **1.62**. +Minimum Supported Rust Version is **1.64**. It is enforced on CI (in "/.github/workflows/ci.yml") with `RUST_VERSION` variable. This version can only be upgraded in breaking releases. +The `wgpu-core`, `wgpu-hal`, and `wgpu-types` crates should never +require an MSRV ahead of Firefox's MSRV for nightly builds, as +determined by the value of `MINIMUM_RUST_VERSION` in +[`python/mozboot/mozboot/util.py`][util]. However, Firefox uses `cargo +vendor` to extract only those crates it actually uses, so the +workspace's other crates can have more recent MSRVs. + +*Note for Rust 1.64*: The workspace itself can even use a newer MSRV +than Firefox, as long as the vendoring step's `Cargo.toml` rewriting +removes any features Firefox's MSRV couldn't handle. For example, +`wgpu` can use manifest key inheritance, added in Rust 1.64, even +before Firefox reaches that MSRV, because `cargo vendor` copies +inherited values directly into the individual crates' `Cargo.toml` +files, producing 1.63-compatible files. + +[util]: https://searchfox.org/mozilla-central/source/python/mozboot/mozboot/util.py + ## Getting Started ### Rust -Rust examples can be found at `wgpu/examples`. You can run the examples with `cargo run --example name`. See the [list of examples](wgpu/examples). For detailed instructions, look at our [Get Started](https://github.com/gfx-rs/wgpu/wiki/Getting-Started) wiki. +Rust examples can be found at `wgpu/examples`. You can run the examples with `cargo run --example name`. See the [list of examples](wgpu/examples). For detailed instructions, look at [Running the examples](https://github.com/gfx-rs/wgpu/wiki/Running-the-examples) on the wiki. If you are looking for a wgpu tutorial, look at the following: - https://sotrh.github.io/learn-wgpu/ @@ -93,7 +110,7 @@ Note that the WGSL specification is still under development, so the [draft specification][wgsl spec] does not exactly describe what `wgpu` supports. See [below](#tracking-the-webgpu-and-wgsl-draft-specifications) for details. -To enable SPIR-V shaders, enable the `spirv` feature of wgpu. +To enable SPIR-V shaders, enable the `spirv` feature of wgpu. To enable GLSL shaders, enable the `glsl` feature of wgpu. ### Angle @@ -101,7 +118,7 @@ To enable GLSL shaders, enable the `glsl` feature of wgpu. [Angle](http://angleproject.org) is a translation layer from GLES to other backends, developed by Google. We support running our GLES3 backend over it in order to reach platforms with GLES2 or DX11 support, which aren't accessible otherwise. In order to run with Angle, "angle" feature has to be enabled, and Angle libraries placed in a location visible to the application. -These binaries can be downloaded from [gfbuild-angle](https://github.com/DileSoft/gfbuild-angle) artifacts, [manual compilation](https://github.com/google/angle/blob/main/doc/DevSetup.md) may be required on Macs with Apple silicon. +These binaries can be downloaded from [gfbuild-angle](https://github.com/DileSoft/gfbuild-angle) artifacts, [manual compilation](https://github.com/google/angle/blob/main/doc/DevSetup.md) may be required on Macs with Apple silicon. On Windows, you generally need to copy them into the working directory, in the same directory as the executable, or somewhere in your path. On Linux, you can point to them using `LD_LIBRARY_PATH` environment. @@ -204,7 +221,7 @@ Exactly which WGSL features `wgpu` supports depends on how you are using it: for catching up to the WGSL specification, but in general there is no up-to-date summary of the differences between Naga and the WGSL spec. - + - When running in a web browser (by compilation to WebAssembly) without the `"webgl"` feature enabled, `wgpu` relies on the browser's own WebGPU implementation. diff --git a/cts_runner/Cargo.toml b/cts_runner/Cargo.toml index 63df3d1974..03e5ffd9fc 100644 --- a/cts_runner/Cargo.toml +++ b/cts_runner/Cargo.toml @@ -2,20 +2,19 @@ name = "cts_runner" version = "0.1.0" authors = [ - "Luca Casonato ", + "Luca Casonato " ] -edition = "2021" +edition.workspace = true description = "CTS runner for wgpu" -license = "MIT OR Apache-2.0" +license.workspace = true publish = false -resolver = "2" [dependencies] -deno_console = "0.69.0" -deno_core = "0.151.0" -deno_url = "0.69.0" -deno_web = "0.100.0" -deno_webidl = "0.69.0" -deno_webgpu = { path = "../deno_webgpu" } -tokio = { version = "1.19.0", features = ["full"] } -termcolor = "1.1.2" +deno_console.workspace = true +deno_core.workspace = true +deno_url.workspace = true +deno_web.workspace = true +deno_webidl.workspace = true +deno_webgpu.workspace = true +tokio = { workspace = true, features = ["full"] } +termcolor.workspace = true diff --git a/deno_webgpu/Cargo.toml b/deno_webgpu/Cargo.toml index ea7cbf53a7..3091c9c088 100644 --- a/deno_webgpu/Cargo.toml +++ b/deno_webgpu/Cargo.toml @@ -4,15 +4,15 @@ name = "deno_webgpu" version = "0.63.0" authors = ["the Deno authors"] -edition = "2021" +edition.workspace = true license = "MIT" readme = "README.md" -repository = "https://github.com/gfx-rs/wgpu" +repository.workspace = true description = "WebGPU implementation for Deno" [dependencies] -deno_core = "0.151.0" -serde = { version = "1.0", features = ["derive"] } -tokio = { version = "1.19", features = ["full"] } -wgpu-core = { path = "../wgpu-core", features = ["trace", "replay", "serde", "strict_asserts", "wgsl"] } -wgpu-types = { path = "../wgpu-types", features = ["trace", "replay", "serde"] } +deno_core.workspace = true +serde = { workspace = true, features = ["derive"] } +tokio = { workspace = true, features = ["full"] } +wgpu-core = { workspace = true, features = ["trace", "replay", "serde", "strict_asserts", "wgsl"] } +wgpu-types = { workspace = true, features = ["trace", "replay", "serde"] } diff --git a/deno_webgpu/src/buffer.rs b/deno_webgpu/src/buffer.rs index 250950f4e1..738760606c 100644 --- a/deno_webgpu/src/buffer.rs +++ b/deno_webgpu/src/buffer.rs @@ -13,6 +13,7 @@ use std::cell::RefCell; use std::convert::TryFrom; use std::rc::Rc; use std::time::Duration; +use wgpu_core::resource::BufferAccessResult; use super::error::DomExceptionOperationError; use super::error::WebGpuResult; @@ -70,7 +71,7 @@ pub async fn op_webgpu_buffer_get_map_async( offset: u64, size: u64, ) -> Result { - let (sender, receiver) = oneshot::channel::>(); + let (sender, receiver) = oneshot::channel::(); let device; { @@ -84,12 +85,7 @@ pub async fn op_webgpu_buffer_get_map_async( device = device_resource.0; let callback = Box::new(move |status| { - sender - .send(match status { - wgpu_core::resource::BufferMapAsyncStatus::Success => Ok(()), - _ => unreachable!(), // TODO - }) - .unwrap(); + sender.send(status).unwrap(); }); // TODO(lucacasonato): error handling diff --git a/deno_webgpu/src/bundle.rs b/deno_webgpu/src/bundle.rs index 65c120fdfc..6c1e1cae43 100644 --- a/deno_webgpu/src/bundle.rs +++ b/deno_webgpu/src/bundle.rs @@ -1,6 +1,6 @@ // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; +use deno_core::error::{type_error, AnyError}; use deno_core::op; use deno_core::OpState; use deno_core::Resource; @@ -255,16 +255,14 @@ pub fn op_webgpu_render_bundle_encoder_set_index_buffer( let render_bundle_encoder_resource = state .resource_table .get::(render_bundle_encoder_rid)?; + let size = Some( + std::num::NonZeroU64::new(size).ok_or_else(|| type_error("size must be larger than 0"))?, + ); render_bundle_encoder_resource .0 .borrow_mut() - .set_index_buffer( - buffer_resource.0, - index_format, - offset, - std::num::NonZeroU64::new(size), - ); + .set_index_buffer(buffer_resource.0, index_format, offset, size); Ok(WebGpuResult::empty()) } @@ -284,13 +282,16 @@ pub fn op_webgpu_render_bundle_encoder_set_vertex_buffer( let render_bundle_encoder_resource = state .resource_table .get::(render_bundle_encoder_rid)?; + let size = Some( + std::num::NonZeroU64::new(size).ok_or_else(|| type_error("size must be larger than 0"))?, + ); wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_vertex_buffer( &mut render_bundle_encoder_resource.0.borrow_mut(), slot, buffer_resource.0, offset, - std::num::NonZeroU64::new(size), + size, ); Ok(WebGpuResult::empty()) diff --git a/deno_webgpu/src/lib.rs b/deno_webgpu/src/lib.rs index 6f79aaa613..287e340920 100644 --- a/deno_webgpu/src/lib.rs +++ b/deno_webgpu/src/lib.rs @@ -1,5 +1,7 @@ // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. +#![warn(unsafe_op_in_unsafe_fn)] + use deno_core::error::AnyError; use deno_core::include_js_files; use deno_core::op; diff --git a/dummy/Cargo.toml b/dummy/Cargo.toml index 796bde96f2..c4304724b2 100644 --- a/dummy/Cargo.toml +++ b/dummy/Cargo.toml @@ -1,16 +1,13 @@ [package] name = "dummy" -version = "0.1.0" -authors = [ - "Dzmitry Malyshau ", -] -edition = "2021" -license = "MIT OR Apache-2.0" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true publish = false [features] [dependencies.wgc] -path = "../wgpu-core" -package = "wgpu-core" +workspace = true features = ["serial-pass", "trace"] diff --git a/player/Cargo.toml b/player/Cargo.toml index 8cfbbcff83..814ed26ae7 100644 --- a/player/Cargo.toml +++ b/player/Cargo.toml @@ -1,35 +1,33 @@ [package] name = "player" -version = "0.1.0" -authors = [ - "Dzmitry Malyshau ", -] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "WebGPU trace player" -homepage = "https://github.com/gfx-rs/wgpu" -repository = "https://github.com/gfx-rs/wgpu" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true publish = false [features] +angle = ["wgc/angle"] +vulkan-portability = ["wgc/vulkan-portability"] [dependencies] -env_logger = "0.9" -log = "0.4" -raw-window-handle = "0.5" -ron = "0.8" -winit = { version = "0.27", optional = true } +env_logger.workspace = true +log.workspace = true +raw-window-handle.workspace = true +ron.workspace = true +winit = { workspace = true, optional = true } [dependencies.wgt] -path = "../wgpu-types" -package = "wgpu-types" +workspace = true features = ["replay"] [dependencies.wgc] -path = "../wgpu-core" -package = "wgpu-core" +workspace = true features = ["replay", "raw-window-handle", "strict_asserts", "wgsl"] [dev-dependencies] -serde = "1" +serde.workspace = true diff --git a/player/src/lib.rs b/player/src/lib.rs index dffb7c069d..0ef6080b77 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -6,6 +6,8 @@ * so that we don't accidentally try to use the same ID. !*/ +#![warn(unsafe_op_in_unsafe_fn)] + use wgc::device::trace; use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path}; diff --git a/player/tests/test.rs b/player/tests/test.rs index 0ced0fad8c..06da011a49 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -55,10 +55,9 @@ struct Test<'a> { actions: Vec>, } -fn map_callback(status: wgc::resource::BufferMapAsyncStatus) { - match status { - wgc::resource::BufferMapAsyncStatus::Success => (), - _ => panic!("Unable to map"), +fn map_callback(status: Result<(), wgc::resource::BufferAccessError>) { + if let Err(e) = status { + panic!("Buffer map error: {}", e); } } diff --git a/run-wasm/Cargo.toml b/run-wasm/Cargo.toml index 3ac77b26d8..773782457e 100644 --- a/run-wasm/Cargo.toml +++ b/run-wasm/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "run-wasm" -version = "0.1.0" -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -cargo-run-wasm = "0.2.0" +cargo-run-wasm.workspace = true diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index 8dd0632121..c92865b73d 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -1,13 +1,17 @@ [package] name = "wgpu-core" -version = "0.14.0" -authors = ["wgpu developers"] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "WebGPU core logic on wgpu-hal" -homepage = "https://github.com/gfx-rs/wgpu" -repository = "https://github.com/gfx-rs/wgpu" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] [lib] @@ -29,55 +33,48 @@ wgsl = ["naga/wgsl-in"] vulkan-portability = ["hal/vulkan"] [dependencies] -arrayvec = "0.7" -bitflags = "1.0" -bit-vec = "0.6" -codespan-reporting = "0.11" -fxhash = "0.2" -log = "0.4" -# parking_lot 0.12 switches from `winapi` to `windows`; permit either -parking_lot = ">=0.11,<0.13" -profiling = { version = "1", default-features = false } -raw-window-handle = { version = "0.5", optional = true } -ron = { version = "0.8", optional = true } -serde = { version = "1.0", features = ["serde_derive"], optional = true } -smallvec = "1" -thiserror = "1" +arrayvec.workspace = true +bitflags.workspace = true +bit-vec.workspace = true +codespan-reporting.workspace = true +fxhash.workspace = true +log.workspace = true +parking_lot.workspace = true +profiling.workspace = true +raw-window-handle = { workspace = true, optional = true } +ron = { workspace = true, optional = true } +serde = { workspace = true, features = ["serde_derive"], optional = true } +smallvec.workspace = true +thiserror.workspace = true [dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" -version = "0.10" +workspace = true features = ["clone", "span", "validate"] [dependencies.wgt] -path = "../wgpu-types" -package = "wgpu-types" -version = "0.14" +workspace = true [dependencies.hal] -path = "../wgpu-hal" -package = "wgpu-hal" -version = "0.14" +workspace = true [target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies] -web-sys = { version = "0.3", features = ["HtmlCanvasElement", "OffscreenCanvas"] } +web-sys = { workspace = true, features = ["HtmlCanvasElement", "OffscreenCanvas"] } [target.'cfg(target_arch = "wasm32")'.dependencies] -hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.14", features = ["gles"] } +hal = { workspace = true, features = ["gles"] } [target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies] -hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.14", features = ["metal"] } +hal = { workspace = true, features = ["metal"] } #Note: could also enable "vulkan" for Vulkan Portability [target.'cfg(all(not(target_arch = "wasm32"), unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies] -hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.14", features = ["vulkan", "gles", "renderdoc"] } +hal = { workspace = true, features = ["vulkan", "gles", "renderdoc"] } [target.'cfg(all(not(target_arch = "wasm32"), windows))'.dependencies] -hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.14", features = ["vulkan", "dx12", "dx11", "renderdoc"] } +hal = { workspace = true, features = ["vulkan", "dx12", "dx11", "renderdoc"] } [target.'cfg(target_os = "emscripten")'.dependencies] -hal = { path = "../wgpu-hal", package = "wgpu-hal", version = "0.14", features = ["emscripten"] } +hal = { workspace = true, features = ["emscripten"] } [build-dependencies] -cfg_aliases = "0.1" +cfg_aliases.workspace = true diff --git a/wgpu-core/src/assertions.rs b/wgpu-core/src/assertions.rs index 98e8bd8797..fb9314a3c9 100644 --- a/wgpu-core/src/assertions.rs +++ b/wgpu-core/src/assertions.rs @@ -12,6 +12,7 @@ //! in both debug and release builds. #[cfg(feature = "strict_asserts")] +#[macro_export] macro_rules! strict_assert { ( $( $arg:tt )* ) => { assert!( $( $arg )* ) @@ -19,6 +20,7 @@ macro_rules! strict_assert { } #[cfg(feature = "strict_asserts")] +#[macro_export] macro_rules! strict_assert_eq { ( $( $arg:tt )* ) => { assert_eq!( $( $arg )* ) @@ -26,6 +28,7 @@ macro_rules! strict_assert_eq { } #[cfg(feature = "strict_asserts")] +#[macro_export] macro_rules! strict_assert_ne { ( $( $arg:tt )* ) => { assert_ne!( $( $arg )* ) @@ -35,15 +38,23 @@ macro_rules! strict_assert_ne { #[cfg(not(feature = "strict_asserts"))] #[macro_export] macro_rules! strict_assert { - ( $( $arg:tt )* ) => {}; + ( $( $arg:tt )* ) => { + debug_assert!( $( $arg )* ) + }; } #[cfg(not(feature = "strict_asserts"))] +#[macro_export] macro_rules! strict_assert_eq { - ( $( $arg:tt )* ) => {}; + ( $( $arg:tt )* ) => { + debug_assert_eq!( $( $arg )* ) + }; } #[cfg(not(feature = "strict_asserts"))] +#[macro_export] macro_rules! strict_assert_ne { - ( $( $arg:tt )* ) => {}; + ( $( $arg:tt )* ) => { + debug_assert_ne!( $( $arg )* ) + }; } diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 71f95a723d..5feef3bab4 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -50,6 +50,8 @@ pub enum CreateBindGroupLayoutError { TooManyBindings(BindingTypeMaxCountError), #[error("Binding index {binding} is greater than the maximum index {maximum}")] InvalidBindingIndex { binding: u32, maximum: u32 }, + #[error("Invalid visibility {0:?}")] + InvalidVisibility(wgt::ShaderStages), } //TODO: refactor this to move out `enum BindingError`. @@ -403,7 +405,9 @@ pub struct BindGroupEntry<'a> { #[cfg_attr(feature = "trace", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BindGroupDescriptor<'a> { - /// Debug label of the bind group. This will show up in graphics debuggers for easy identification. + /// Debug label of the bind group. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// The [`BindGroupLayout`] that corresponds to this bind group. pub layout: BindGroupLayoutId, @@ -416,7 +420,9 @@ pub struct BindGroupDescriptor<'a> { #[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))] pub struct BindGroupLayoutDescriptor<'a> { - /// Debug label of the bind group layout. This will show up in graphics debuggers for easy identification. + /// Debug label of the bind group layout. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// Array of entries in this BindGroupLayout pub entries: Cow<'a, [wgt::BindGroupLayoutEntry]>, @@ -537,16 +543,20 @@ pub enum PushConstantUploadError { #[cfg_attr(feature = "trace", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PipelineLayoutDescriptor<'a> { - /// Debug label of the pipeine layout. This will show up in graphics debuggers for easy identification. + /// Debug label of the pipeine layout. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// Bind groups that this pipeline uses. The first entry will provide all the bindings for /// "set = 0", second entry will provide all the bindings for "set = 1" etc. pub bind_group_layouts: Cow<'a, [BindGroupLayoutId]>, - /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants - /// must define the range in push constant memory that corresponds to its single `layout(push_constant)` - /// uniform block. + /// Set of push constant ranges this pipeline uses. Each shader stage that + /// uses push constants must define the range in push constant memory that + /// corresponds to its single `layout(push_constant)` uniform block. /// - /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`](wgt::Features::PUSH_CONSTANTS) must be enabled. + /// If this array is non-empty, the + /// [`Features::PUSH_CONSTANTS`](wgt::Features::PUSH_CONSTANTS) feature must + /// be enabled. pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>, } diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index cc928ef178..fdcb60c52d 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -309,8 +309,9 @@ struct PushConstantChange { enable: bool, } -/// Break up possibly overlapping push constant ranges into a set of non-overlapping ranges -/// which contain all the stage flags of the original ranges. This allows us to zero out (or write any value) +/// Break up possibly overlapping push constant ranges into a set of +/// non-overlapping ranges which contain all the stage flags of the +/// original ranges. This allows us to zero out (or write any value) /// to every possible value. pub fn compute_nonoverlapping_ranges( ranges: &[wgt::PushConstantRange], diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index c756137f60..37560ec885 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -110,18 +110,28 @@ use hal::CommandEncoder as _; #[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))] pub struct RenderBundleEncoderDescriptor<'a> { - /// Debug label of the render bundle encoder. This will show up in graphics debuggers for easy identification. + /// Debug label of the render bundle encoder. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, - /// The formats of the color attachments that this render bundle is capable to rendering to. This - /// must match the formats of the color attachments in the renderpass this render bundle is executed in. + /// The formats of the color attachments that this render bundle is capable + /// to rendering to. + /// + /// This must match the formats of the color attachments in the + /// renderpass this render bundle is executed in. pub color_formats: Cow<'a, [Option]>, - /// Information about the depth attachment that this render bundle is capable to rendering to. The format - /// must match the format of the depth attachments in the renderpass this render bundle is executed in. + /// Information about the depth attachment that this render bundle is + /// capable to rendering to. + /// + /// The format must match the format of the depth attachments in the + /// renderpass this render bundle is executed in. pub depth_stencil: Option, - /// Sample count this render bundle is capable of rendering to. This must match the pipelines and - /// the renderpasses it is used in. + /// Sample count this render bundle is capable of rendering to. + /// + /// This must match the pipelines and the renderpasses it is used in. pub sample_count: u32, - /// If this render bundle will rendering to multiple array layers in the attachments at the same time. + /// If this render bundle will rendering to multiple array layers in the + /// attachments at the same time. pub multiview: Option, } @@ -753,7 +763,7 @@ impl RenderBundle { let mut offsets = self.base.dynamic_offsets.as_slice(); let mut pipeline_layout_id = None::>; if let Some(ref label) = self.base.label { - raw.begin_debug_marker(label); + unsafe { raw.begin_debug_marker(label) }; } for command in self.base.commands.iter() { @@ -764,17 +774,19 @@ impl RenderBundle { bind_group_id, } => { let bind_group = bind_group_guard.get(bind_group_id).unwrap(); - raw.set_bind_group( - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, - index as u32, - &bind_group.raw, - &offsets[..num_dynamic_offsets as usize], - ); + unsafe { + raw.set_bind_group( + &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, + index as u32, + &bind_group.raw, + &offsets[..num_dynamic_offsets as usize], + ) + }; offsets = &offsets[num_dynamic_offsets as usize..]; } RenderCommand::SetPipeline(pipeline_id) => { let pipeline = pipeline_guard.get(pipeline_id).unwrap(); - raw.set_render_pipeline(&pipeline.raw); + unsafe { raw.set_render_pipeline(&pipeline.raw) }; pipeline_layout_id = Some(pipeline.layout_id.value); } @@ -795,7 +807,7 @@ impl RenderBundle { offset, size, }; - raw.set_index_buffer(bb, index_format); + unsafe { raw.set_index_buffer(bb, index_format) }; } RenderCommand::SetVertexBuffer { slot, @@ -814,7 +826,7 @@ impl RenderBundle { offset, size, }; - raw.set_vertex_buffer(slot, bb); + unsafe { raw.set_vertex_buffer(slot, bb) }; } RenderCommand::SetPushConstant { stages, @@ -831,18 +843,22 @@ impl RenderBundle { let data_slice = &self.base.push_constant_data [(values_offset as usize)..values_end_offset]; - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + unsafe { + raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + } } else { super::push_constant_clear( offset, size_bytes, |clear_offset, clear_data| { - raw.set_push_constants( - &pipeline_layout.raw, - stages, - clear_offset, - clear_data, - ); + unsafe { + raw.set_push_constants( + &pipeline_layout.raw, + stages, + clear_offset, + clear_data, + ) + }; }, ); } @@ -853,7 +869,7 @@ impl RenderBundle { first_vertex, first_instance, } => { - raw.draw(first_vertex, vertex_count, first_instance, instance_count); + unsafe { raw.draw(first_vertex, vertex_count, first_instance, instance_count) }; } RenderCommand::DrawIndexed { index_count, @@ -862,13 +878,15 @@ impl RenderBundle { base_vertex, first_instance, } => { - raw.draw_indexed( - first_index, - index_count, - base_vertex, - first_instance, - instance_count, - ); + unsafe { + raw.draw_indexed( + first_index, + index_count, + base_vertex, + first_instance, + instance_count, + ) + }; } RenderCommand::MultiDrawIndirect { buffer_id, @@ -882,7 +900,7 @@ impl RenderBundle { .raw .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; - raw.draw_indirect(buffer, offset, 1); + unsafe { raw.draw_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { buffer_id, @@ -896,7 +914,7 @@ impl RenderBundle { .raw .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; - raw.draw_indexed_indirect(buffer, offset, 1); + unsafe { raw.draw_indexed_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { .. } | RenderCommand::MultiDrawIndirectCount { .. } => { @@ -921,7 +939,7 @@ impl RenderBundle { } if let Some(_) = self.base.label { - raw.end_debug_marker(); + unsafe { raw.end_debug_marker() }; } Ok(()) @@ -1429,13 +1447,15 @@ pub mod bundle_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = bundle.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut bundle.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + bundle.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut bundle.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -1512,7 +1532,7 @@ pub mod bundle_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per RenderBundle.", ); diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 3269de0312..aa9d60638e 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -266,12 +266,19 @@ pub(crate) fn clear_texture( layers: range.layer_range.clone(), }; - // If we're in a texture-init usecase, we know that the texture is already tracked since whatever caused the init requirement, - // will have caused the usage tracker to be aware of the texture. Meaning, that it is safe to call call change_replace_tracked if the life_guard is already gone - // (i.e. the user no longer holds on to this texture). - // On the other hand, when coming via command_encoder_clear_texture, the life_guard is still there since in order to call it a texture object is needed. + // If we're in a texture-init usecase, we know that the texture is already + // tracked since whatever caused the init requirement, will have caused the + // usage tracker to be aware of the texture. Meaning, that it is safe to + // call call change_replace_tracked if the life_guard is already gone (i.e. + // the user no longer holds on to this texture). // - // We could in theory distinguish these two scenarios in the internal clear_texture api in order to remove this check and call the cheaper change_replace_tracked whenever possible. + // On the other hand, when coming via command_encoder_clear_texture, the + // life_guard is still there since in order to call it a texture object is + // needed. + // + // We could in theory distinguish these two scenarios in the internal + // clear_texture api in order to remove this check and call the cheaper + // change_replace_tracked whenever possible. let dst_barrier = texture_tracker .set_single(dst_texture, dst_texture_id.0, selector, clear_usage) .unwrap() @@ -331,8 +338,13 @@ fn clear_texture_via_buffer_copies( // round down to a multiple of rows needed by the texture format let max_rows_per_copy = max_rows_per_copy / format_desc.block_dimensions.1 as u32 * format_desc.block_dimensions.1 as u32; - assert!(max_rows_per_copy > 0, "Zero buffer size is too small to fill a single row of a texture with format {:?} and desc {:?}", - texture_desc.format, texture_desc.size); + assert!( + max_rows_per_copy > 0, + "Zero buffer size is too small to fill a single row \ + of a texture with format {:?} and desc {:?}", + texture_desc.format, + texture_desc.size + ); let z_range = 0..(if texture_desc.dimension == wgt::TextureDimension::D3 { mip_size.depth_or_array_layers @@ -343,7 +355,8 @@ fn clear_texture_via_buffer_copies( for array_layer in range.layer_range.clone() { // TODO: Only doing one layer at a time for volume textures right now. for z in z_range.clone() { - // May need multiple copies for each subresource! However, we assume that we never need to split a row. + // May need multiple copies for each subresource! However, we + // assume that we never need to split a row. let mut num_rows_left = mip_size.height; while num_rows_left > 0 { let num_rows = num_rows_left.min(max_rows_per_copy); @@ -399,7 +412,8 @@ fn clear_texture_via_render_passes( for mip_level in range.mip_range { let extent = extent_base.mip_level_size(mip_level, is_3d_texture); let layer_or_depth_range = if dst_texture.desc.dimension == wgt::TextureDimension::D3 { - // TODO: We assume that we're allowed to do clear operations on volume texture slices, this is not properly specified. + // TODO: We assume that we're allowed to do clear operations on + // volume texture slices, this is not properly specified. 0..extent.depth_or_array_layers } else { range.layer_range.clone() diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index cbbb2e054e..c228519595 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -268,7 +268,8 @@ impl State { Ok(()) } - // `extra_buffer` is there to represent the indirect buffer that is also part of the usage scope. + // `extra_buffer` is there to represent the indirect buffer that is also + // part of the usage scope. fn flush_states( &mut self, raw_encoder: &mut A::CommandEncoder, @@ -391,7 +392,8 @@ impl Global { raw.begin_compute_pass(&hal_desc); } - // Immediate texture inits required because of prior discards. Need to be inserted before texture reads. + // Immediate texture inits required because of prior discards. Need to + // be inserted before texture reads. let mut pending_discard_init_fixups = SurfacesInDiscardState::new(); for command in base.commands { @@ -763,8 +765,11 @@ impl Global { } cmd_buf.status = CommandEncoderStatus::Recording; - // There can be entries left in pending_discard_init_fixups if a bind group was set, but not used (i.e. no Dispatch occurred) - // However, we already altered the discard/init_action state on this cmd_buf, so we need to apply the promised changes. + // There can be entries left in pending_discard_init_fixups if a bind + // group was set, but not used (i.e. no Dispatch occurred) + // + // However, we already altered the discard/init_action state on this + // cmd_buf, so we need to apply the promised changes. fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), raw, @@ -795,13 +800,15 @@ pub mod compute_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = pass.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut pass.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + pass.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut pass.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -849,7 +856,7 @@ pub mod compute_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per ComputePass.", ); @@ -900,7 +907,7 @@ pub mod compute_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(ComputeCommand::PushDebugGroup { @@ -924,7 +931,7 @@ pub mod compute_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(ComputeCommand::InsertDebugMarker { diff --git a/wgpu-core/src/command/memory_init.rs b/wgpu-core/src/command/memory_init.rs index d974575f91..52735fec51 100644 --- a/wgpu-core/src/command/memory_init.rs +++ b/wgpu-core/src/command/memory_init.rs @@ -27,10 +27,12 @@ pub(crate) type SurfacesInDiscardState = Vec; #[derive(Default)] pub(crate) struct CommandBufferTextureMemoryActions { - // init actions describe the tracker actions that we need to be executed before the command buffer is executed + /// The tracker actions that we need to be executed before the command + /// buffer is executed. init_actions: Vec, - // discards describe all the discards that haven't been followed by init again within the command buffer - // i.e. everything in this list resets the texture init state *after* the command buffer execution + /// All the discards that haven't been followed by init again within the + /// command buffer i.e. everything in this list resets the texture init + /// state *after* the command buffer execution discards: Vec, } @@ -54,19 +56,22 @@ impl CommandBufferTextureMemoryActions { ) -> SurfacesInDiscardState { let mut immediately_necessary_clears = SurfacesInDiscardState::new(); - // Note that within a command buffer we may stack arbitrary memory init actions on the same texture - // Since we react to them in sequence, they are going to be dropped again at queue submit + // Note that within a command buffer we may stack arbitrary memory init + // actions on the same texture Since we react to them in sequence, they + // are going to be dropped again at queue submit // - // We don't need to add MemoryInitKind::NeedsInitializedMemory to init_actions if a surface is part of the discard list. - // But that would mean splitting up the action which is more than we'd win here. + // We don't need to add MemoryInitKind::NeedsInitializedMemory to + // init_actions if a surface is part of the discard list. But that would + // mean splitting up the action which is more than we'd win here. self.init_actions .extend(match texture_guard.get(action.id) { Ok(texture) => texture.initialization_status.check_action(action), Err(_) => return immediately_necessary_clears, // texture no longer exists }); - // We expect very few discarded surfaces at any point in time which is why a simple linear search is likely best. - // (i.e. most of the time self.discards is empty!) + // We expect very few discarded surfaces at any point in time which is + // why a simple linear search is likely best. (i.e. most of the time + // self.discards is empty!) let init_actions = &mut self.init_actions; self.discards.retain(|discarded_surface| { if discarded_surface.texture == action.id @@ -79,7 +84,9 @@ impl CommandBufferTextureMemoryActions { if let MemoryInitKind::NeedsInitializedMemory = action.kind { immediately_necessary_clears.push(discarded_surface.clone()); - // Mark surface as implicitly initialized (this is relevant because it might have been uninitialized prior to discarding + // Mark surface as implicitly initialized (this is relevant + // because it might have been uninitialized prior to + // discarding init_actions.push(TextureInitTrackerAction { id: discarded_surface.texture, range: TextureInitRange { @@ -99,7 +106,8 @@ impl CommandBufferTextureMemoryActions { immediately_necessary_clears } - // Shortcut for register_init_action when it is known that the action is an implicit init, not requiring any immediate resource init. + // Shortcut for register_init_action when it is known that the action is an + // implicit init, not requiring any immediate resource init. pub(crate) fn register_implicit_init( &mut self, id: id::Valid, @@ -118,7 +126,9 @@ impl CommandBufferTextureMemoryActions { } } -// Utility function that takes discarded surfaces from (several calls to) register_init_action and initializes them on the spot. +// Utility function that takes discarded surfaces from (several calls to) +// register_init_action and initializes them on the spot. +// // Takes care of barriers as well! pub(crate) fn fixup_discarded_surfaces< A: HalApi, @@ -148,14 +158,16 @@ pub(crate) fn fixup_discarded_surfaces< } impl BakedCommands { - // inserts all buffer initializations that are going to be needed for executing the commands and updates resource init states accordingly + // inserts all buffer initializations that are going to be needed for + // executing the commands and updates resource init states accordingly pub(crate) fn initialize_buffer_memory( &mut self, device_tracker: &mut Tracker, buffer_guard: &mut Storage, id::BufferId>, ) -> Result<(), DestroyedBufferError> { // Gather init ranges for each buffer so we can collapse them. - // It is not possible to do this at an earlier point since previously executed command buffer change the resource init state. + // It is not possible to do this at an earlier point since previously + // executed command buffer change the resource init state. let mut uninitialized_ranges_per_buffer = FastHashMap::default(); for buffer_use in self.buffer_memory_init_actions.drain(..) { let buffer = buffer_guard @@ -194,15 +206,19 @@ impl BakedCommands { // Collapse touching ranges. ranges.sort_by_key(|r| r.start); for i in (1..ranges.len()).rev() { - assert!(ranges[i - 1].end <= ranges[i].start); // The memory init tracker made sure of this! + // The memory init tracker made sure of this! + assert!(ranges[i - 1].end <= ranges[i].start); if ranges[i].start == ranges[i - 1].end { ranges[i - 1].end = ranges[i].end; ranges.swap_remove(i); // Ordering not important at this point } } - // Don't do use_replace since the buffer may already no longer have a ref_count. - // However, we *know* that it is currently in use, so the tracker must already know about it. + // Don't do use_replace since the buffer may already no longer have + // a ref_count. + // + // However, we *know* that it is currently in use, so the tracker + // must already know about it. let transition = device_tracker .buffers .set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST) @@ -223,8 +239,20 @@ impl BakedCommands { } for range in ranges.iter() { - assert!(range.start % wgt::COPY_BUFFER_ALIGNMENT == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", raw_buf, range.start); - assert!(range.end % wgt::COPY_BUFFER_ALIGNMENT == 0, "Buffer {:?} has an uninitialized range with an end not aligned to 4 (end was {})", raw_buf, range.end); + assert!( + range.start % wgt::COPY_BUFFER_ALIGNMENT == 0, + "Buffer {:?} has an uninitialized range with a start \ + not aligned to 4 (start was {})", + raw_buf, + range.start + ); + assert!( + range.end % wgt::COPY_BUFFER_ALIGNMENT == 0, + "Buffer {:?} has an uninitialized range with an end \ + not aligned to 4 (end was {})", + raw_buf, + range.end + ); unsafe { self.encoder.clear_buffer(raw_buf, range.clone()); @@ -234,8 +262,10 @@ impl BakedCommands { Ok(()) } - // inserts all texture initializations that are going to be needed for executing the commands and updates resource init states accordingly - // any textures that are left discarded by this command buffer will be marked as uninitialized + // inserts all texture initializations that are going to be needed for + // executing the commands and updates resource init states accordingly any + // textures that are left discarded by this command buffer will be marked as + // uninitialized pub(crate) fn initialize_texture_memory( &mut self, device_tracker: &mut Tracker, @@ -290,7 +320,9 @@ impl BakedCommands { } } - // Now that all buffers/textures have the proper init state for before cmdbuf start, we discard init states for textures it left discarded after its execution. + // Now that all buffers/textures have the proper init state for before + // cmdbuf start, we discard init states for textures it left discarded + // after its execution. for surface_discard in self.texture_memory_actions.discards.iter() { let texture = texture_guard .get_mut(surface_discard.texture) diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 257bb2edaf..f6dc086350 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -511,7 +511,8 @@ impl BindGroupStateChange { ) -> bool { // For now never deduplicate bind groups with dynamic offsets. if offset_length == 0 { - // If this get returns None, that means we're well over the limit, so let the call through to get a proper error + // If this get returns None, that means we're well over the limit, + // so let the call through to get a proper error if let Some(current_bind_group) = self.last_states.get_mut(index as usize) { // Bail out if we're binding the same bind group. if current_bind_group.set_and_check_redundant(bind_group_id) { @@ -525,7 +526,8 @@ impl BindGroupStateChange { if let Some(current_bind_group) = self.last_states.get_mut(index as usize) { current_bind_group.reset(); } - dynamic_offsets.extend_from_slice(slice::from_raw_parts(offsets, offset_length)); + dynamic_offsets + .extend_from_slice(unsafe { slice::from_raw_parts(offsets, offset_length) }); } false } diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index 0d6fc8a558..d4176195a8 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -180,7 +180,8 @@ impl QuerySet { query_index: u32, reset_state: Option<&mut QueryResetMap>, ) -> Result<&A::QuerySet, QueryUseError> { - // We need to defer our resets because we are in a renderpass, add the usage to the reset map. + // We need to defer our resets because we are in a renderpass, + // add the usage to the reset map. if let Some(reset) = reset_state { let used = reset.use_query_set(query_set_id, self, query_index); if used { diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 4e221b3c95..09af0bbe6a 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -63,7 +63,9 @@ pub enum LoadOp { #[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "kebab-case"))] pub enum StoreOp { - /// Discards the content of the render target. If you don't care about the contents of the target, this can be faster. + /// Discards the content of the render target. + /// + /// If you don't care about the contents of the target, this can be faster. Discard = 0, /// Store the result of the renderpass. Store = 1, @@ -75,15 +77,20 @@ pub enum StoreOp { #[cfg_attr(any(feature = "serial-pass", feature = "trace"), derive(Serialize))] #[cfg_attr(any(feature = "serial-pass", feature = "replay"), derive(Deserialize))] pub struct PassChannel { - /// Operation to perform to the output attachment at the start of a renderpass. This must be clear if it - /// is the first renderpass rendering to a swap chain image. + /// Operation to perform to the output attachment at the start of a + /// renderpass. + /// + /// This must be clear if it is the first renderpass rendering to a swap + /// chain image. pub load_op: LoadOp, /// Operation to perform to the output attachment at the end of a renderpass. pub store_op: StoreOp, - /// If load_op is [`LoadOp::Clear`], the attachment will be cleared to this color. + /// If load_op is [`LoadOp::Clear`], the attachment will be cleared to this + /// color. pub clear_value: V, - /// If true, the relevant channel is not changed by a renderpass, and the corresponding attachment - /// can be used inside the pass by other read-only usages. + /// If true, the relevant channel is not changed by a renderpass, and the + /// corresponding attachment can be used inside the pass by other read-only + /// usages. pub read_only: bool, } @@ -236,16 +243,17 @@ impl RenderPass { impl fmt::Debug for RenderPass { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "RenderPass {{ encoder_id: {:?}, color_targets: {:?}, depth_stencil_target: {:?}, data: {:?} commands, {:?} dynamic offsets, and {:?} push constant u32s }}", - self.parent_id, - self.color_targets, - self.depth_stencil_target, - self.base.commands.len(), - self.base.dynamic_offsets.len(), - self.base.push_constant_data.len(), - ) + f.debug_struct("RenderPass") + .field("encoder_id", &self.parent_id) + .field("color_targets", &self.color_targets) + .field("depth_stencil_target", &self.depth_stencil_target) + .field("command count", &self.base.commands.len()) + .field("dynamic offset count", &self.base.dynamic_offsets.len()) + .field( + "push constant u32 count", + &self.base.push_constant_data.len(), + ) + .finish() } } @@ -595,7 +603,8 @@ type AttachmentDataVec = ArrayVec; struct RenderPassInfo<'a, A: HalApi> { context: RenderPassContext, usage_scope: UsageScope, - render_attachments: AttachmentDataVec>, // All render attachments, including depth/stencil + /// All render attachments, including depth/stencil + render_attachments: AttachmentDataVec>, is_depth_read_only: bool, is_stencil_read_only: bool, extent: wgt::Extent3d, @@ -633,8 +642,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { ); } if channel.store_op == StoreOp::Discard { - // the discard happens at the *end* of a pass - // but recording the discard right away be alright since the texture can't be used during the pass anyways + // the discard happens at the *end* of a pass, but recording the + // discard right away be alright since the texture can't be used + // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { texture: view.parent_id.value.0, mip_level: view.selector.mips.start, @@ -722,9 +732,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { expected: sample_count, }); } - if sample_count != 1 && sample_count != 4 { - return Err(RenderPassErrorInner::InvalidSampleCount(sample_count)); - } attachment_type_name = type_name; Ok(()) }; @@ -769,15 +776,27 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { &mut pending_discard_init_fixups, ); } else { - // This is the only place (anywhere in wgpu) where Stencil & Depth init state can diverge. - // To safe us the overhead of tracking init state of texture aspects everywhere, - // we're going to cheat a little bit in order to keep the init state of both Stencil and Depth aspects in sync. - // The expectation is that we hit this path extremely rarely! - + // This is the only place (anywhere in wgpu) where Stencil & + // Depth init state can diverge. + // + // To safe us the overhead of tracking init state of texture + // aspects everywhere, we're going to cheat a little bit in + // order to keep the init state of both Stencil and Depth + // aspects in sync. The expectation is that we hit this path + // extremely rarely! + // // Diverging LoadOp, i.e. Load + Clear: - // Record MemoryInitKind::NeedsInitializedMemory for the entire surface, a bit wasteful on unit but no negative effect! - // Rationale: If the loaded channel is uninitialized it needs clearing, the cleared channel doesn't care. (If everything is already initialized nothing special happens) - // (possible minor optimization: Clear caused by NeedsInitializedMemory should know that it doesn't need to clear the aspect that was set to C) + // + // Record MemoryInitKind::NeedsInitializedMemory for the entire + // surface, a bit wasteful on unit but no negative effect! + // + // Rationale: If the loaded channel is uninitialized it needs + // clearing, the cleared channel doesn't care. (If everything is + // already initialized nothing special happens) + // + // (possible minor optimization: Clear caused by + // NeedsInitializedMemory should know that it doesn't need to + // clear the aspect that was set to C) let need_init_beforehand = at.depth.load_op == LoadOp::Load || at.stencil.load_op == LoadOp::Load; if need_init_beforehand { @@ -794,8 +813,12 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } // Diverging Store, i.e. Discard + Store: - // Immediately zero out channel that is set to discard after we're done with the render pass. - // This allows us to set the entire surface to MemoryInitKind::ImplicitlyInitialized (if it isn't already set to NeedsInitializedMemory). + // + // Immediately zero out channel that is set to discard after + // we're done with the render pass. This allows us to set the + // entire surface to MemoryInitKind::ImplicitlyInitialized (if + // it isn't already set to NeedsInitializedMemory). + // // (possible optimization: Delay and potentially drop this zeroing) if at.depth.store_op != at.stencil.store_op { if !need_init_beforehand { @@ -1025,10 +1048,15 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }; } - // If either only stencil or depth was discarded, we put in a special clear pass to keep the init status of the aspects in sync. - // We do this so we don't need to track init state for depth/stencil aspects individually. - // Note that we don't go the usual route of "brute force" initializing the texture when need arises here, - // since this path is actually something a user may genuinely want (where as the other cases are more seen along the lines as gracefully handling a user error). + // If either only stencil or depth was discarded, we put in a special + // clear pass to keep the init status of the aspects in sync. We do this + // so we don't need to track init state for depth/stencil aspects + // individually. + // + // Note that we don't go the usual route of "brute force" initializing + // the texture when need arises here, since this path is actually + // something a user may genuinely want (where as the other cases are + // more seen along the lines as gracefully handling a user error). if let Some((aspect, view)) = self.divergent_discarded_depth_stencil_aspect { let (depth_ops, stencil_ops) = if aspect == wgt::TextureAspect::DepthOnly { ( @@ -1630,7 +1658,8 @@ impl Global { }; state.is_ready(indexed).map_pass_err(scope)?; - //TODO: validate that base_vertex + max_index() is within the provided range + //TODO: validate that base_vertex + max_index() is + // within the provided range let last_index = first_index + index_count; let index_limit = state.index.limit; if last_index > index_limit { @@ -2084,13 +2113,15 @@ pub mod render_ffi { offsets: *const DynamicOffset, offset_length: usize, ) { - let redundant = pass.current_bind_groups.set_and_check_redundant( - bind_group_id, - index, - &mut pass.base.dynamic_offsets, - offsets, - offset_length, - ); + let redundant = unsafe { + pass.current_bind_groups.set_and_check_redundant( + bind_group_id, + index, + &mut pass.base.dynamic_offsets, + offsets, + offset_length, + ) + }; if redundant { return; @@ -2210,7 +2241,7 @@ pub mod render_ffi { 0, "Push constant size must be aligned to 4 bytes." ); - let data_slice = slice::from_raw_parts(data, size_bytes as usize); + let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) }; let value_offset = pass.base.push_constant_data.len().try_into().expect( "Ran out of push constant space. Don't set 4gb of push constants per RenderPass.", ); @@ -2373,7 +2404,7 @@ pub mod render_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(RenderCommand::PushDebugGroup { @@ -2397,7 +2428,7 @@ pub mod render_ffi { label: RawString, color: u32, ) { - let bytes = ffi::CStr::from_ptr(label).to_bytes(); + let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes(); pass.base.string_data.extend_from_slice(bytes); pass.base.commands.push(RenderCommand::InsertDebugMarker { @@ -2449,7 +2480,9 @@ pub mod render_ffi { render_bundle_ids: *const id::RenderBundleId, render_bundle_ids_length: usize, ) { - for &bundle_id in slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) { + for &bundle_id in + unsafe { slice::from_raw_parts(render_bundle_ids, render_bundle_ids_length) } + { pass.base .commands .push(RenderCommand::ExecuteBundle(bundle_id)); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 04294e00d6..063e33a0a0 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -197,8 +197,15 @@ pub(crate) fn extract_texture_selector( Ok((selector, base, format)) } -/// Function copied with some modifications from webgpu standard -/// If successful, returns (number of buffer bytes required for this copy, number of bytes between array layers). +/// WebGPU's [validating linear texture data][vltd] algorithm. +/// +/// Copied with some modifications from WebGPU standard. +/// +/// If successful, returns a pair `(bytes, stride)`, where: +/// - `bytes` is the number of buffer bytes required for this copy, and +/// - `stride` number of bytes between array layers. +/// +/// [vltd]: https://gpuweb.github.io/gpuweb/#abstract-opdef-validating-linear-texture-data pub(crate) fn validate_linear_texture_data( layout: &wgt::ImageDataLayout, format: wgt::TextureFormat, @@ -208,7 +215,10 @@ pub(crate) fn validate_linear_texture_data( copy_size: &Extent3d, need_copy_aligned_rows: bool, ) -> Result<(BufferAddress, BufferAddress), TransferError> { - // Convert all inputs to BufferAddress (u64) to prevent overflow issues + // Convert all inputs to BufferAddress (u64) to avoid some of the overflow issues + // Note: u64 is not always enough to prevent overflow, especially when multiplying + // something with a potentially large depth value, so it is preferrable to validate + // the copy size before calling this function (for example via `validate_texture_copy_range`). let copy_width = copy_size.width as BufferAddress; let copy_height = copy_size.height as BufferAddress; let copy_depth = copy_size.depth_or_array_layers as BufferAddress; @@ -288,8 +298,13 @@ pub(crate) fn validate_linear_texture_data( Ok((required_bytes_in_copy, bytes_per_image)) } -/// Function copied with minor modifications from webgpu standard +/// WebGPU's [validating texture copy range][vtcr] algorithm. +/// +/// Copied with minor modifications from WebGPU standard. +/// /// Returns the HAL copy extent and the layer count. +/// +/// [vtcr]: https://gpuweb.github.io/gpuweb/#valid-texture-copy-range pub(crate) fn validate_texture_copy_range( texture_copy_view: &ImageCopyTexture, desc: &wgt::TextureDescriptor<()>, @@ -441,7 +456,10 @@ fn handle_texture_init( } } -// Ensures the source texture of a transfer is in the right initialization state and records the state for after the transfer operation. +/// Prepare a transfer's source texture. +/// +/// Ensure the source texture of a transfer is in the right initialization +/// state, and record the state for after the transfer operation. fn handle_src_texture_init( cmd_buf: &mut CommandBuffer, device: &Device, @@ -464,7 +482,10 @@ fn handle_src_texture_init( Ok(()) } -// Ensures the destination texture of a transfer is in the right initialization state and records the state for after the transfer operation. +/// Prepare a transfer's destination texture. +/// +/// Ensure the destination texture of a transfer is in the right initialization +/// state, and record the state for after the transfer operation. fn handle_dst_texture_init( cmd_buf: &mut CommandBuffer, device: &Device, @@ -476,8 +497,10 @@ fn handle_dst_texture_init( .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(destination.texture))?; - // Attention: If we don't write full texture subresources, we need to a full clear first since we don't track subrects. - // This means that in rare cases even a *destination* texture of a transfer may need an immediate texture init. + // Attention: If we don't write full texture subresources, we need to a full + // clear first since we don't track subrects. This means that in rare cases + // even a *destination* texture of a transfer may need an immediate texture + // init. let dst_init_kind = if has_copy_partial_init_tracker_coverage( copy_size, destination.mip_level, @@ -517,10 +540,13 @@ impl Global { let hub = A::hub(self); let mut token = Token::root(); + let (device_guard, mut token) = hub.devices.read(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; let (buffer_guard, _) = hub.buffers.read(&mut token); + let device = &device_guard[cmd_buf.device_id.value]; + #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { list.push(TraceCommand::CopyBufferToBuffer { @@ -570,6 +596,26 @@ impl Global { if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 { return Err(TransferError::UnalignedBufferOffset(destination_offset).into()); } + if !device + .downlevel + .flags + .contains(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER) + && (src_buffer.usage.contains(wgt::BufferUsages::INDEX) + || dst_buffer.usage.contains(wgt::BufferUsages::INDEX)) + { + let forbidden_usages = wgt::BufferUsages::VERTEX + | wgt::BufferUsages::UNIFORM + | wgt::BufferUsages::INDIRECT + | wgt::BufferUsages::STORAGE; + if src_buffer.usage.intersects(forbidden_usages) + || dst_buffer.usage.intersects(forbidden_usages) + { + return Err(TransferError::MissingDownlevelFlags(MissingDownlevelFlags( + wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER, + )) + .into()); + } + } let source_end_offset = source_offset + size; let destination_end_offset = destination_offset + size; @@ -674,7 +720,9 @@ impl Global { let (dst_range, dst_base, _) = extract_texture_selector(destination, copy_size, dst_texture)?; - // Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases. + // Handle texture init *before* dealing with barrier transitions so we + // have an easier time inserting "immediate-inits" that may be required + // by prior discards in rare cases. handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; let (src_buffer, src_pending) = cmd_buf @@ -801,7 +849,9 @@ impl Global { let (src_range, src_base, _) = extract_texture_selector(source, copy_size, src_texture)?; - // Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases. + // Handle texture init *before* dealing with barrier transitions so we + // have an easier time inserting "immediate-inits" that may be required + // by prior discards in rare cases. handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; let src_pending = cmd_buf @@ -988,7 +1038,9 @@ impl Global { return Err(TransferError::MismatchedAspects.into()); } - // Handle texture init *before* dealing with barrier transitions so we have an easier time inserting "immediate-inits" that may be required by prior discards in rare cases. + // Handle texture init *before* dealing with barrier transitions so we + // have an easier time inserting "immediate-inits" that may be required + // by prior discards in rare cases. handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index c06349dc4f..605aea3dab 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -132,61 +132,61 @@ impl NonReferencedResources { if !self.buffers.is_empty() { profiling::scope!("destroy_buffers"); for raw in self.buffers.drain(..) { - device.destroy_buffer(raw); + unsafe { device.destroy_buffer(raw) }; } } if !self.textures.is_empty() { profiling::scope!("destroy_textures"); for raw in self.textures.drain(..) { - device.destroy_texture(raw); + unsafe { device.destroy_texture(raw) }; } } if !self.texture_views.is_empty() { profiling::scope!("destroy_texture_views"); for raw in self.texture_views.drain(..) { - device.destroy_texture_view(raw); + unsafe { device.destroy_texture_view(raw) }; } } if !self.samplers.is_empty() { profiling::scope!("destroy_samplers"); for raw in self.samplers.drain(..) { - device.destroy_sampler(raw); + unsafe { device.destroy_sampler(raw) }; } } if !self.bind_groups.is_empty() { profiling::scope!("destroy_bind_groups"); for raw in self.bind_groups.drain(..) { - device.destroy_bind_group(raw); + unsafe { device.destroy_bind_group(raw) }; } } if !self.compute_pipes.is_empty() { profiling::scope!("destroy_compute_pipelines"); for raw in self.compute_pipes.drain(..) { - device.destroy_compute_pipeline(raw); + unsafe { device.destroy_compute_pipeline(raw) }; } } if !self.render_pipes.is_empty() { profiling::scope!("destroy_render_pipelines"); for raw in self.render_pipes.drain(..) { - device.destroy_render_pipeline(raw); + unsafe { device.destroy_render_pipeline(raw) }; } } if !self.bind_group_layouts.is_empty() { profiling::scope!("destroy_bind_group_layouts"); for raw in self.bind_group_layouts.drain(..) { - device.destroy_bind_group_layout(raw); + unsafe { device.destroy_bind_group_layout(raw) }; } } if !self.pipeline_layouts.is_empty() { profiling::scope!("destroy_pipeline_layouts"); for raw in self.pipeline_layouts.drain(..) { - device.destroy_pipeline_layout(raw); + unsafe { device.destroy_pipeline_layout(raw) }; } } if !self.query_sets.is_empty() { profiling::scope!("destroy_query_sets"); for raw in self.query_sets.drain(..) { - device.destroy_query_set(raw); + unsafe { device.destroy_query_set(raw) }; } } } @@ -887,11 +887,11 @@ impl LifetimeTracker { range: mapping.range.start..mapping.range.start + size, host, }; - resource::BufferMapAsyncStatus::Success + Ok(()) } Err(e) => { log::error!("Mapping failed {:?}", e); - resource::BufferMapAsyncStatus::Error + Err(e) } } } else { @@ -900,7 +900,7 @@ impl LifetimeTracker { range: mapping.range, host: mapping.op.host, }; - resource::BufferMapAsyncStatus::Success + Ok(()) }; pending_callbacks.push((mapping.op, status)); } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 8f3e5ce4d3..86c79a9a6d 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -9,8 +9,8 @@ use crate::{ }, instance::{self, Adapter, Surface}, pipeline, present, - resource::{self, BufferMapState}, - resource::{BufferAccessError, BufferMapAsyncStatus, BufferMapOperation}, + resource::{self, BufferAccessResult, BufferMapState}, + resource::{BufferAccessError, BufferMapOperation}, track::{BindGroupStates, TextureSelector, Tracker}, validation::{self, check_buffer_usage, check_texture_usage}, FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, @@ -32,7 +32,8 @@ pub mod queue; pub mod trace; pub const SHADER_STAGE_COUNT: usize = 3; -// Should be large enough for the largest possible texture row. This value is enough for a 16k texture with float4 format. +// Should be large enough for the largest possible texture row. This +// value is enough for a 16k texture with float4 format. pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10; const CLEANUP_WAIT_MS: u32 = 5000; @@ -79,10 +80,7 @@ pub(crate) struct RenderPassContext { #[derive(Clone, Debug, Error)] pub enum RenderPassCompatibilityError { #[error("Incompatible color attachment: the renderpass expected {0:?} but was given {1:?}")] - IncompatibleColorAttachment( - ArrayVec, { hal::MAX_COLOR_ATTACHMENTS }>, - ArrayVec, { hal::MAX_COLOR_ATTACHMENTS }>, - ), + IncompatibleColorAttachment(Vec>, Vec>), #[error( "Incompatible depth-stencil attachment: the renderpass expected {0:?} but was given {1:?}" )] @@ -101,8 +99,8 @@ impl RenderPassContext { ) -> Result<(), RenderPassCompatibilityError> { if self.attachments.colors != other.attachments.colors { return Err(RenderPassCompatibilityError::IncompatibleColorAttachment( - self.attachments.colors.clone(), - other.attachments.colors.clone(), + self.attachments.colors.iter().cloned().collect(), + other.attachments.colors.iter().cloned().collect(), )); } if self.attachments.depth_stencil != other.attachments.depth_stencil { @@ -129,7 +127,7 @@ impl RenderPassContext { } } -pub type BufferMapPendingClosure = (BufferMapOperation, BufferMapAsyncStatus); +pub type BufferMapPendingClosure = (BufferMapOperation, BufferAccessResult); #[derive(Default)] pub struct UserClosures { @@ -181,19 +179,27 @@ fn map_buffer( assert_eq!(offset % wgt::COPY_BUFFER_ALIGNMENT, 0); assert_eq!(size % wgt::COPY_BUFFER_ALIGNMENT, 0); - // Zero out uninitialized parts of the mapping. (Spec dictates all resources behave as if they were initialized with zero) + // Zero out uninitialized parts of the mapping. (Spec dictates all resources + // behave as if they were initialized with zero) // - // If this is a read mapping, ideally we would use a `clear_buffer` command before reading the data from GPU (i.e. `invalidate_range`). - // However, this would require us to kick off and wait for a command buffer or piggy back on an existing one (the later is likely the only worthwhile option). - // As reading uninitialized memory isn't a particular important path to support, - // we instead just initialize the memory here and make sure it is GPU visible, so this happens at max only once for every buffer region. + // If this is a read mapping, ideally we would use a `clear_buffer` command + // before reading the data from GPU (i.e. `invalidate_range`). However, this + // would require us to kick off and wait for a command buffer or piggy back + // on an existing one (the later is likely the only worthwhile option). As + // reading uninitialized memory isn't a particular important path to + // support, we instead just initialize the memory here and make sure it is + // GPU visible, so this happens at max only once for every buffer region. // - // If this is a write mapping zeroing out the memory here is the only reasonable way as all data is pushed to GPU anyways. - let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none(); // No need to flush if it is flushed later anyways. + // If this is a write mapping zeroing out the memory here is the only + // reasonable way as all data is pushed to GPU anyways. + + // No need to flush if it is flushed later anyways. + let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none(); let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) }; for uninitialized in buffer.initialization_status.drain(offset..(size + offset)) { - // The mapping's pointer is already offset, however we track the uninitialized range relative to the buffer's start. + // The mapping's pointer is already offset, however we track the + // uninitialized range relative to the buffer's start. let fill_range = (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; mapped[fill_range].fill(0); @@ -243,6 +249,7 @@ impl CommandAllocator { /// Structure describing a logical device. Some members are internally mutable, /// stored behind mutexes. +/// /// TODO: establish clear order of locking for these: /// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, /// `render_passes`, `pending_writes`, `trace`. @@ -286,8 +293,9 @@ pub struct Device { pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, pub(crate) downlevel: wgt::DownlevelCapabilities, - //TODO: move this behind another mutex. This would allow several methods to switch - // to borrow Device immutably, such as `write_buffer`, `write_texture`, and `buffer_unmap`. + // TODO: move this behind another mutex. This would allow several methods to + // switch to borrow Device immutably, such as `write_buffer`, `write_texture`, + // and `buffer_unmap`. pending_writes: queue::PendingWrites, #[cfg(feature = "trace")] pub(crate) trace: Option>, @@ -579,10 +587,21 @@ impl Device { }); } + if desc.usage.contains(wgt::BufferUsages::INDEX) + && desc.usage.contains( + wgt::BufferUsages::VERTEX + | wgt::BufferUsages::UNIFORM + | wgt::BufferUsages::INDIRECT + | wgt::BufferUsages::STORAGE, + ) + { + self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?; + } + let mut usage = conv::map_buffer_usage(desc.usage); - if desc.usage.is_empty() { - return Err(resource::CreateBufferError::EmptyUsage); + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(resource::CreateBufferError::InvalidUsage(desc.usage)); } if !self @@ -608,15 +627,16 @@ impl Device { usage |= hal::BufferUses::COPY_DST; } } else { - // We are required to zero out (initialize) all memory. - // This is done on demand using clear_buffer which requires write transfer usage! + // We are required to zero out (initialize) all memory. This is done + // on demand using clear_buffer which requires write transfer usage! usage |= hal::BufferUses::COPY_DST; } let actual_size = if desc.size == 0 { wgt::COPY_BUFFER_ALIGNMENT } else if desc.usage.contains(wgt::BufferUsages::VERTEX) { - // Bumping the size by 1 so that we can bind an empty range at the end of the buffer. + // Bumping the size by 1 so that we can bind an empty range at the + // end of the buffer. desc.size + 1 } else { desc.size @@ -697,8 +717,8 @@ impl Device { ) -> Result, resource::CreateTextureError> { use resource::{CreateTextureError, TextureDimensionError}; - if desc.usage.is_empty() { - return Err(CreateTextureError::EmptyUsage); + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(CreateTextureError::InvalidUsage(desc.usage)); } conv::check_texture_dimension_size( @@ -788,12 +808,23 @@ impl Device { return Err(CreateTextureError::MultisampledNotRenderAttachment); } + if !format_features.flags.intersects( + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8, + ) { + return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); + } + if !format_features .flags - .contains(wgt::TextureFormatFeatureFlags::MULTISAMPLE) + .sample_count_supported(desc.sample_count) { - return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); - } + return Err(CreateTextureError::InvalidSampleCount( + desc.sample_count, + desc.format, + )); + }; } let mips = desc.mip_level_count; @@ -823,7 +854,8 @@ impl Device { // TODO: validate missing TextureDescriptor::view_formats. - // Enforce having COPY_DST/DEPTH_STENCIL_WRIT/COLOR_TARGET otherwise we wouldn't be able to initialize the texture. + // Enforce having COPY_DST/DEPTH_STENCIL_WRIT/COLOR_TARGET otherwise we + // wouldn't be able to initialize the texture. let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) | if format_desc.sample_type == wgt::TextureSampleType::Depth { hal::TextureUses::DEPTH_STENCIL_WRITE @@ -1210,7 +1242,7 @@ impl Device { pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError { source: code.to_string(), label: desc.label.as_ref().map(|l| l.to_string()), - inner, + inner: Box::new(inner), }) })?; (Cow::Owned(module), code.into_owned()) @@ -1273,7 +1305,7 @@ impl Device { pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError { source, label: desc.label.as_ref().map(|l| l.to_string()), - inner, + inner: Box::new(inner), }) })?; let interface = @@ -1528,6 +1560,13 @@ impl Device { error, })?; } + + if entry.visibility.contains_invalid_bits() { + return Err( + binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility), + ); + } + if entry.visibility.contains(wgt::ShaderStages::VERTEX) { if writable_storage == WritableStorage::Yes { required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE; @@ -1579,8 +1618,8 @@ impl Device { for entry in entry_map.values() { count_validator.add_binding(entry); } - // If a single bind group layout violates limits, the pipeline layout is definitely - // going to violate limits too, lets catch it now. + // If a single bind group layout violates limits, the pipeline layout is + // definitely going to violate limits too, lets catch it now. count_validator .validate(&self.limits) .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; @@ -2093,8 +2132,10 @@ impl Device { (Tst::Float { filterable: true }, Tst::Float { filterable: true }) | // if we expect float, also accept depth (Tst::Float { .. }, Tst::Depth, ..) => {} - // if we expect filterable, also accept Float that is defined as unfilterable if filterable feature is explicitly enabled - // (only hit if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is enabled) + // if we expect filterable, also accept Float that is defined as + // unfilterable if filterable feature is explicitly enabled (only hit + // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is + // enabled) (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {} _ => { return Err(Error::InvalidTextureSampleType { @@ -2616,6 +2657,10 @@ impl Device { for (i, cs) in color_targets.iter().enumerate() { if let Some(cs) = cs.as_ref() { let error = loop { + if cs.write_mask.contains_invalid_bits() { + break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask)); + } + let format_features = self.describe_format_features(adapter, cs.format)?; if !format_features .allowed_usages @@ -2628,8 +2673,10 @@ impl Device { let adapter_specific = self .features .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // according to WebGPU specifications the texture needs to be [`TextureFormatFeatureFlags::FILTERABLE`] - // if blending is set - use [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude this limitation + // according to WebGPU specifications the texture needs to be + // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use + // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude + // this limitation if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) { break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); } @@ -2637,7 +2684,9 @@ impl Device { break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); } if desc.multisample.count > 1 - && !format_features.flags.contains(Tfff::MULTISAMPLE) + && !format_features + .flags + .sample_count_supported(desc.multisample.count) { break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); } @@ -2671,7 +2720,10 @@ impl Device { ds.format, )); } - if desc.multisample.count > 1 && !format_features.flags.contains(Tfff::MULTISAMPLE) + if desc.multisample.count > 1 + && !format_features + .flags + .sample_count_supported(desc.multisample.count) { break Some(pipeline::DepthStencilStateError::FormatNotMultisampled( ds.format, @@ -2959,7 +3011,8 @@ impl Device { let using_device_features = self .features .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // If we're running downlevel, we need to manually ask the backend what we can use as we can't trust WebGPU. + // If we're running downlevel, we need to manually ask the backend what + // we can use as we can't trust WebGPU. let downlevel = !self.downlevel.is_webgpu_compliant(); if using_device_features || downlevel { @@ -3172,43 +3225,21 @@ impl Global { Ok(adapter.is_surface_supported(surface)) } - pub fn surface_get_supported_formats( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - ) -> Result, instance::GetSurfaceSupportError> { - profiling::scope!("Surface::get_supported_formats"); - self.fetch_adapter_and_surface::>( - surface_id, - adapter_id, - |adapter, surface| surface.get_supported_formats(adapter), - ) - } - - pub fn surface_get_supported_present_modes( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - ) -> Result, instance::GetSurfaceSupportError> { - profiling::scope!("Surface::get_supported_present_modes"); - self.fetch_adapter_and_surface::>( - surface_id, - adapter_id, - |adapter, surface| surface.get_supported_present_modes(adapter), - ) - } - - pub fn surface_get_supported_alpha_modes( + pub fn surface_get_capabilities( &self, surface_id: id::SurfaceId, adapter_id: id::AdapterId, - ) -> Result, instance::GetSurfaceSupportError> { - profiling::scope!("Surface::get_supported_alpha_modes"); - self.fetch_adapter_and_surface::>( - surface_id, - adapter_id, - |adapter, surface| surface.get_supported_alpha_modes(adapter), - ) + ) -> Result { + profiling::scope!("Surface::get_capabilities"); + self.fetch_adapter_and_surface::(surface_id, adapter_id, |adapter, surface| { + let hal_caps = surface.get_capabilities(adapter)?; + + Ok(wgt::SurfaceCapabilities { + formats: hal_caps.formats, + present_modes: hal_caps.present_modes, + alpha_modes: hal_caps.composite_alpha_modes, + }) + }) } fn fetch_adapter_and_surface< @@ -3480,7 +3511,7 @@ impl Global { buffer_id: id::BufferId, offset: BufferAddress, data: &[u8], - ) -> Result<(), BufferAccessError> { + ) -> BufferAccessResult { profiling::scope!("Device::set_buffer_sub_data"); let hub = A::hub(self); @@ -3537,7 +3568,7 @@ impl Global { buffer_id: id::BufferId, offset: BufferAddress, data: &mut [u8], - ) -> Result<(), BufferAccessError> { + ) -> BufferAccessResult { profiling::scope!("Device::get_buffer_sub_data"); let hub = A::hub(self); @@ -3767,7 +3798,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; - // NB: Any change done through the raw texture handle will not be recorded in the replay + // NB: Any change done through the raw texture handle will not be + // recorded in the replay #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -4432,7 +4464,8 @@ impl Global { (id, Some(error)) } - #[allow(unused_unsafe)] // Unsafe-ness of internal calls has little to do with unsafe-ness of this. + // Unsafe-ness of internal calls has little to do with unsafe-ness of this. + #[allow(unused_unsafe)] /// # Safety /// /// This function passes SPIR-V binary to the backend as-is and can potentially result in a @@ -4472,10 +4505,11 @@ impl Global { }); }; - let shader = match device.create_shader_module_spirv(device_id, desc, &source) { - Ok(shader) => shader, - Err(e) => break e, - }; + let shader = + match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { + Ok(shader) => shader, + Err(e) => break e, + }; let id = fid.assign(shader, &mut token); return (id.0, None); }; @@ -5127,7 +5161,7 @@ impl Global { .composite_alpha_modes .contains(&config.composite_alpha_mode) { - let new_alpha_mode = 'b: loop { + let new_alpha_mode = 'alpha: loop { // Automatic alpha mode checks. let fallbacks = match config.composite_alpha_mode { wgt::CompositeAlphaMode::Auto => &[ @@ -5144,11 +5178,15 @@ impl Global { for &fallback in fallbacks { if caps.composite_alpha_modes.contains(&fallback) { - break 'b fallback; + break 'alpha fallback; } } - unreachable!("Fallback system failed to choose alpha mode. This is a bug. AlphaMode: {:?}, Options: {:?}", config.composite_alpha_mode, &caps.composite_alpha_modes); + unreachable!( + "Fallback system failed to choose alpha mode. This is a bug. \ + AlphaMode: {:?}, Options: {:?}", + config.composite_alpha_mode, &caps.composite_alpha_modes + ); }; log::info!( @@ -5318,7 +5356,8 @@ impl Global { /// /// If `force_wait` is true, block until all buffer mappings are done. /// - /// Return `all_queue_empty` indicating whether there are more queue submissions still in flight. + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. fn poll_devices( &self, force_wait: bool, @@ -5362,7 +5401,8 @@ impl Global { /// /// This is the implementation of `wgpu::Instance::poll_all`. /// - /// Return `all_queue_empty` indicating whether there are more queue submissions still in flight. + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. pub fn poll_all_devices(&self, force_wait: bool) -> Result { let mut closures = UserClosures::default(); let mut all_queue_empty = true; @@ -5473,33 +5513,12 @@ impl Global { buffer_id: id::BufferId, range: Range, op: BufferMapOperation, - ) -> Result<(), BufferAccessError> { + ) -> BufferAccessResult { // User callbacks must not be called while holding buffer_map_async_inner's locks, so we // defer the error callback if it needs to be called immediately (typically when running // into errors). if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { - let status = match &err { - &BufferAccessError::Device(_) => BufferMapAsyncStatus::ContextLost, - &BufferAccessError::Invalid | &BufferAccessError::Destroyed => { - BufferMapAsyncStatus::Invalid - } - &BufferAccessError::AlreadyMapped => BufferMapAsyncStatus::AlreadyMapped, - &BufferAccessError::MapAlreadyPending => BufferMapAsyncStatus::MapAlreadyPending, - &BufferAccessError::MissingBufferUsage(_) => { - BufferMapAsyncStatus::InvalidUsageFlags - } - &BufferAccessError::UnalignedRange - | &BufferAccessError::UnalignedRangeSize { .. } - | &BufferAccessError::UnalignedOffset { .. } => { - BufferMapAsyncStatus::InvalidAlignment - } - &BufferAccessError::OutOfBoundsUnderrun { .. } - | &BufferAccessError::OutOfBoundsOverrun { .. } - | &BufferAccessError::NegativeRange { .. } => BufferMapAsyncStatus::InvalidRange, - _ => BufferMapAsyncStatus::Error, - }; - - op.callback.call(status); + op.callback.call(Err(err.clone())); return Err(err); } @@ -5658,7 +5677,10 @@ impl Global { max: range.end, }); } - unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } + // ptr points to the beginning of the range we mapped in map_async + // rather thant the beginning of the buffer. + let relative_offset = (offset - range.start) as isize; + unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) } } resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { Err(BufferAccessError::NotMapped) @@ -5735,7 +5757,7 @@ impl Global { return Err(BufferAccessError::NotMapped); } resource::BufferMapState::Waiting(pending) => { - return Ok(Some((pending.op, resource::BufferMapAsyncStatus::Aborted))); + return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); } resource::BufferMapState::Active { ptr, range, host } => { if host == HostMap::Write { @@ -5766,10 +5788,7 @@ impl Global { Ok(None) } - pub fn buffer_unmap( - &self, - buffer_id: id::BufferId, - ) -> Result<(), BufferAccessError> { + pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { profiling::scope!("unmap", "Buffer"); let closure; diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index c77bd2d6e9..cf8c721b85 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -60,8 +60,11 @@ impl SubmittedWorkDoneClosure { /// # Safety /// - /// - The callback pointer must be valid to call with the provided user_data pointer. - /// - Both pointers must point to 'static data as the callback may happen at an unspecified time. + /// - The callback pointer must be valid to call with the provided `user_data` + /// pointer. + /// + /// - Both pointers must point to `'static` data, as the callback may happen at + /// an unspecified time. pub unsafe fn from_c(inner: SubmittedWorkDoneClosureC) -> Self { Self { inner: SubmittedWorkDoneClosureInner::C { inner }, @@ -113,22 +116,25 @@ pub(super) struct EncoderInFlight { impl EncoderInFlight { pub(super) unsafe fn land(mut self) -> A::CommandEncoder { - self.raw.reset_all(self.cmd_buffers.into_iter()); + unsafe { self.raw.reset_all(self.cmd_buffers.into_iter()) }; self.raw } } -/// Writes made directly on the device or queue, not as part of a wgpu command buffer. +/// A private command encoder for writes made directly on the device +/// or queue. /// /// Operations like `buffer_unmap`, `queue_write_buffer`, and -/// `queue_write_texture` need to copy data to the GPU. This must be -/// done by encoding and submitting commands at the hal level, but these -/// operations are not associated with any specific wgpu command buffer. +/// `queue_write_texture` need to copy data to the GPU. At the hal +/// level, this must be done by encoding and submitting commands, but +/// these operations are not associated with any specific wgpu command +/// buffer. /// -/// Instead, `Device::pending_writes` owns one of these values, which has its -/// own hal command encoder and resource lists. The commands accumulated here -/// are automatically submitted to the queue the next time the user submits a -/// wgpu command buffer, ahead of the user's commands. +/// Instead, `Device::pending_writes` owns one of these values, which +/// has its own hal command encoder and resource lists. The commands +/// accumulated here are automatically submitted to the queue the next +/// time the user submits a wgpu command buffer, ahead of the user's +/// commands. /// /// All uses of [`StagingBuffer`]s end up here. #[derive(Debug)] @@ -270,9 +276,9 @@ fn prepare_staging_buffer( impl StagingBuffer { unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> { if !self.is_coherent { - device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)); + unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)) }; } - device.unmap_buffer(&self.raw)?; + unsafe { device.unmap_buffer(&self.raw)? }; Ok(()) } } @@ -540,7 +546,8 @@ impl Global { device.pending_writes.dst_buffers.insert(buffer_id); - // Ensure the overwritten bytes are marked as initialized so they don't need to be nulled prior to mapping or binding. + // Ensure the overwritten bytes are marked as initialized so + // they don't need to be nulled prior to mapping or binding. { drop(buffer_guard); let mut buffer_guard = hub.buffers.write(device_token).0; @@ -593,8 +600,21 @@ impl Global { let (selector, dst_base, texture_format) = extract_texture_selector(destination, size, dst)?; let format_desc = texture_format.describe(); - //Note: `_source_bytes_per_array_layer` is ignored since we have a staging copy, - // and it can have a different value. + + let dst = texture_guard.get_mut(destination.texture).unwrap(); + if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) { + return Err( + TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), + ); + } + + // Note: Doing the copy range validation early is important because ensures that the + // dimensions are not going to cause overflow in other parts of the validation. + let (hal_copy_size, array_layer_count) = + validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; + + // Note: `_source_bytes_per_array_layer` is ignored since we + // have a staging copy, and it can have a different value. let (_, _source_bytes_per_array_layer) = validate_linear_texture_data( data_layout, texture_format, @@ -615,7 +635,9 @@ impl Global { let block_rows_per_image = match data_layout.rows_per_image { Some(rows_per_image) => rows_per_image.get(), None => { - // doesn't really matter because we need this only if we copy more than one layer, and then we validate for this being not None + // doesn't really matter because we need this only if we copy + // more than one layer, and then we validate for this being not + // None size.height } }; @@ -642,11 +664,14 @@ impl Global { let mut trackers = device.trackers.lock(); let encoder = device.pending_writes.activate(); - // If the copy does not fully cover the layers, we need to initialize to zero *first* as we don't keep track of partial texture layer inits. - // Strictly speaking we only need to clear the areas of a layer untouched, but this would get increasingly messy. - + // If the copy does not fully cover the layers, we need to initialize to + // zero *first* as we don't keep track of partial texture layer inits. + // + // Strictly speaking we only need to clear the areas of a layer + // untouched, but this would get increasingly messy. let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 { - 0..1 // volume textures don't have a layer range as array volumes aren't supported + // volume textures don't have a layer range as array volumes aren't supported + 0..1 } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; @@ -690,8 +715,6 @@ impl Global { ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let (hal_copy_size, array_layer_count) = - validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; dst.life_guard.use_at(device.active_submission_index + 1); let dst_raw = dst @@ -832,7 +855,8 @@ impl Global { // finish all the command buffers first for &cmb_id in command_buffer_ids { - // we reset the used surface textures every time we use it, so make sure to set_size on it. + // we reset the used surface textures every time we use + // it, so make sure to set_size on it. used_surface_textures.set_size(texture_guard.len()); #[allow(unused_mut)] @@ -1036,12 +1060,14 @@ impl Global { } = *device; { - //TODO: these blocks have a few organizational issues and should be refactored - // (1) it's similar to the code we have per-command-buffer (at the begin and end) - // Maybe we an merge some? - // (2) it's doing the extra locking unconditionally - // Maybe we can only do so if any surfaces are being written to? - + // TODO: These blocks have a few organizational issues, and + // should be refactored. + // + // 1) It's similar to the code we have per-command-buffer + // (at the begin and end) Maybe we can merge some? + // + // 2) It's doing the extra locking unconditionally. Maybe we + // can only do so if any surfaces are being written to? let (_, mut token) = hub.buffers.read(&mut token); // skip token let (mut texture_guard, _) = hub.textures.write(&mut token); diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 2c586c930e..f07010477c 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -316,8 +316,9 @@ impl Storage { /// /// Returns [`None`] if there is an epoch mismatch, or the entry is empty. /// - /// This function is primarily intended for the `as_hal` family of functions where you may need to - /// fallibly get a object backed by an id that could be in a different hub. + /// This function is primarily intended for the `as_hal` family of functions + /// where you may need to fallibly get a object backed by an id that could + /// be in a different hub. pub(crate) fn try_get(&self, id: I) -> Result, InvalidId> { let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { @@ -1155,7 +1156,7 @@ impl Global { let mut surface_guard = self.surfaces.data.write(); let hub = A::hub(self); // this is used for tests, which keep the adapter - hub.clear(&mut *surface_guard, false); + hub.clear(&mut surface_guard, false); } pub fn generate_report(&self) -> GlobalReport { @@ -1204,23 +1205,23 @@ impl Drop for Global { // destroy hubs before the instance gets dropped #[cfg(vulkan)] { - self.hubs.vulkan.clear(&mut *surface_guard, true); + self.hubs.vulkan.clear(&mut surface_guard, true); } #[cfg(metal)] { - self.hubs.metal.clear(&mut *surface_guard, true); + self.hubs.metal.clear(&mut surface_guard, true); } #[cfg(dx12)] { - self.hubs.dx12.clear(&mut *surface_guard, true); + self.hubs.dx12.clear(&mut surface_guard, true); } #[cfg(dx11)] { - self.hubs.dx11.clear(&mut *surface_guard, true); + self.hubs.dx11.clear(&mut surface_guard, true); } #[cfg(gl)] { - self.hubs.gl.clear(&mut *surface_guard, true); + self.hubs.gl.clear(&mut surface_guard, true); } // destroy surfaces diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 3f5ec0e3a0..b31e7b4c48 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -167,6 +167,7 @@ pub(crate) struct Valid(pub I); pub trait TypedId: Copy { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; fn unzip(self) -> (Index, Epoch, Backend); + fn into_raw(self) -> NonZeroId; } #[allow(trivial_numeric_casts)] @@ -187,6 +188,10 @@ impl TypedId for Id { self.backend(), ) } + + fn into_raw(self) -> NonZeroId { + self.0 + } } pub type AdapterId = Id>; diff --git a/wgpu-core/src/init_tracker/buffer.rs b/wgpu-core/src/init_tracker/buffer.rs index 541d0aefd0..ea9b9f6a8d 100644 --- a/wgpu-core/src/init_tracker/buffer.rs +++ b/wgpu-core/src/init_tracker/buffer.rs @@ -12,7 +12,8 @@ pub(crate) struct BufferInitTrackerAction { pub(crate) type BufferInitTracker = InitTracker; impl BufferInitTracker { - /// Checks if an action has/requires any effect on the initialization status and shrinks its range if possible. + /// Checks if an action has/requires any effect on the initialization status + /// and shrinks its range if possible. pub(crate) fn check_action( &self, action: &BufferInitTrackerAction, @@ -20,7 +21,8 @@ impl BufferInitTracker { self.create_action(action.id, action.range.clone(), action.kind) } - /// Creates an action if it would have any effect on the initialization status and shrinks the range if possible. + /// Creates an action if it would have any effect on the initialization + /// status and shrinks the range if possible. pub(crate) fn create_action( &self, id: BufferId, diff --git a/wgpu-core/src/init_tracker/mod.rs b/wgpu-core/src/init_tracker/mod.rs index 1111735f6b..c7e78bc993 100644 --- a/wgpu-core/src/init_tracker/mod.rs +++ b/wgpu-core/src/init_tracker/mod.rs @@ -1,17 +1,35 @@ -// WebGPU specification requires all texture & buffer memory to be zero initialized on first read. -// To avoid unnecessary inits, we track the initialization status of every resource and perform inits lazily. -// -// The granularity is different for buffers and textures: -// * Buffer: Byte granularity to support usecases with large, partially bound buffers well. -// * Texture: Mip-level per layer. I.e. a 2D surface is either completely initialized or not, subrects are not tracked. -// -// Every use of a buffer/texture generates a InitTrackerAction which are recorded and later resolved at queue submit by merging them with the current state and each other in execution order. -// It is important to note that from the point of view of the memory init system there are two kind of writes: -// * Full writes: -// Any kind of memcpy operation. These cause a `MemoryInitKind.ImplicitlyInitialized` action. -// * (Potentially) partial writes: -// E.g. write use in a Shader. The system is not able to determine if a resource is fully initialized afterwards but is no longer allowed to perform any clears, -// therefore this leads to a `MemoryInitKind.ImplicitlyInitialized` action, exactly like a read would. +/*! Lazy initialization of texture and buffer memory. + +The WebGPU specification requires all texture & buffer memory to be +zero initialized on first read. To avoid unnecessary inits, we track +the initialization status of every resource and perform inits lazily. + +The granularity is different for buffers and textures: + +- Buffer: Byte granularity to support usecases with large, partially + bound buffers well. + +- Texture: Mip-level per layer. That is, a 2D surface is either + completely initialized or not, subrects are not tracked. + +Every use of a buffer/texture generates a InitTrackerAction which are +recorded and later resolved at queue submit by merging them with the +current state and each other in execution order. + +It is important to note that from the point of view of the memory init +system there are two kind of writes: + +- **Full writes**: Any kind of memcpy operation. These cause a + `MemoryInitKind.ImplicitlyInitialized` action. + +- **(Potentially) partial writes**: For example, write use in a + Shader. The system is not able to determine if a resource is fully + initialized afterwards but is no longer allowed to perform any + clears, therefore this leads to a + `MemoryInitKind.ImplicitlyInitialized` action, exactly like a read + would. + + */ use smallvec::SmallVec; use std::{fmt, iter, ops::Range}; @@ -27,13 +45,16 @@ pub(crate) use texture::{ #[derive(Debug, Clone, Copy)] pub(crate) enum MemoryInitKind { - // The memory range is going to be written by an already initialized source, thus doesn't need extra attention other than marking as initialized. + // The memory range is going to be written by an already initialized source, + // thus doesn't need extra attention other than marking as initialized. ImplicitlyInitialized, - // The memory range is going to be read, therefore needs to ensure prior initialization. + // The memory range is going to be read, therefore needs to ensure prior + // initialization. NeedsInitializedMemory, } -// Most of the time a resource is either fully uninitialized (one element) or initialized (zero elements). +// Most of the time a resource is either fully uninitialized (one element) or +// initialized (zero elements). type UninitializedRangeVec = SmallVec<[Range; 1]>; /// Tracks initialization status of a linear range from 0..size @@ -134,8 +155,10 @@ where } // Checks if there's any uninitialized ranges within a query. - // If there are any, the range returned a the subrange of the query_range that contains all these uninitialized regions. - // Returned range may be larger than necessary (tradeoff for making this function O(log n)) + // + // If there are any, the range returned a the subrange of the query_range + // that contains all these uninitialized regions. Returned range may be + // larger than necessary (tradeoff for making this function O(log n)) pub(crate) fn check(&self, query_range: Range) -> Option> { let index = self .uninitialized_ranges @@ -148,7 +171,8 @@ where match self.uninitialized_ranges.get(index + 1) { Some(next_range) => { if next_range.start < query_range.end { - // Would need to keep iterating for more accurate upper bound. Don't do that here. + // Would need to keep iterating for more + // accurate upper bound. Don't do that here. Some(start..query_range.end) } else { Some(start..start_range.end.min(query_range.end)) @@ -248,9 +272,12 @@ mod test { assert_eq!(tracker.check(3..8), Some(5..8)); // left overlapping region assert_eq!(tracker.check(3..17), Some(5..17)); // left overlapping region + contained region - assert_eq!(tracker.check(8..22), Some(8..22)); // right overlapping region + contained region (yes, doesn't fix range end!) - assert_eq!(tracker.check(17..22), Some(17..20)); // right overlapping region - assert_eq!(tracker.check(20..25), None); // right non-overlapping + // right overlapping region + contained region (yes, doesn't fix range end!) + assert_eq!(tracker.check(8..22), Some(8..22)); + // right overlapping region + assert_eq!(tracker.check(17..22), Some(17..20)); + // right non-overlapping + assert_eq!(tracker.check(20..25), None); } #[test] diff --git a/wgpu-core/src/init_tracker/texture.rs b/wgpu-core/src/init_tracker/texture.rs index 87bfcdc887..90167e6fa6 100644 --- a/wgpu-core/src/init_tracker/texture.rs +++ b/wgpu-core/src/init_tracker/texture.rs @@ -6,11 +6,13 @@ use std::ops::Range; #[derive(Debug, Clone)] pub(crate) struct TextureInitRange { pub(crate) mip_range: Range, - pub(crate) layer_range: Range, // Strictly array layers. We do *not* track volume slices separately. + // Strictly array layers. We do *not* track volume slices separately. + pub(crate) layer_range: Range, } -// Returns true if a copy operation doesn't fully cover the texture init tracking granularity. -// I.e. if this function returns true for a pending copy operation, the target texture needs to be ensured to be initialized first! +// Returns true if a copy operation doesn't fully cover the texture init +// tracking granularity. I.e. if this function returns true for a pending copy +// operation, the target texture needs to be ensured to be initialized first! pub(crate) fn has_copy_partial_init_tracker_coverage( copy_size: &wgt::Extent3d, mip_level: u32, diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index e1e9831dc3..82468881e2 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -8,7 +8,7 @@ use crate::{ use wgt::{Backend, Backends, PowerPreference}; -use hal::{Adapter as _, Instance as _, SurfaceCapabilities}; +use hal::{Adapter as _, Instance as _}; use thiserror::Error; pub type RequestAdapterOptions = wgt::RequestAdapterOptions; @@ -152,37 +152,10 @@ impl crate::hub::Resource for Surface { } impl Surface { - pub fn get_supported_formats( + pub fn get_capabilities( &self, adapter: &Adapter, - ) -> Result, GetSurfaceSupportError> { - self.get_capabilities(adapter).map(|mut caps| { - // TODO: maybe remove once we support texture view changing srgb-ness - caps.formats.sort_by_key(|f| !f.describe().srgb); - caps.formats - }) - } - - pub fn get_supported_present_modes( - &self, - adapter: &Adapter, - ) -> Result, GetSurfaceSupportError> { - self.get_capabilities(adapter) - .map(|caps| caps.present_modes) - } - - pub fn get_supported_alpha_modes( - &self, - adapter: &Adapter, - ) -> Result, GetSurfaceSupportError> { - self.get_capabilities(adapter) - .map(|caps| caps.composite_alpha_modes) - } - - fn get_capabilities( - &self, - adapter: &Adapter, - ) -> Result { + ) -> Result { let suf = A::get_surface(self).ok_or(GetSurfaceSupportError::Unsupported)?; profiling::scope!("surface_capabilities"); let caps = unsafe { @@ -268,9 +241,18 @@ impl Adapter { ); flags.set( - wgt::TextureFormatFeatureFlags::MULTISAMPLE, - caps.contains(Tfc::MULTISAMPLE), + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2, + caps.contains(Tfc::MULTISAMPLE_X2), + ); + flags.set( + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4, + caps.contains(Tfc::MULTISAMPLE_X4), + ); + flags.set( + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8, + caps.contains(Tfc::MULTISAMPLE_X8), ); + flags.set( wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, caps.contains(Tfc::MULTISAMPLE_RESOLVE), @@ -336,7 +318,10 @@ impl Adapter { .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS) && self.raw.info.device_type == wgt::DeviceType::DiscreteGpu { - log::warn!("Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted"); + log::warn!( + "Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. \ + This is a massive performance footgun and likely not what you wanted" + ); } if let Some(_) = desc.label { @@ -445,10 +430,7 @@ impl Global { ) -> SurfaceId { profiling::scope!("Instance::create_surface"); - //Note: using a dummy argument to work around the following error: - //> cannot provide explicit generic arguments when `impl Trait` is used in argument position fn init( - _: A, inst: &Option, display_handle: raw_window_handle::RawDisplayHandle, window_handle: raw_window_handle::RawWindowHandle, @@ -470,40 +452,15 @@ impl Global { let surface = Surface { presentation: None, #[cfg(vulkan)] - vulkan: init( - hal::api::Vulkan, - &self.instance.vulkan, - display_handle, - window_handle, - ), + vulkan: init::(&self.instance.vulkan, display_handle, window_handle), #[cfg(metal)] - metal: init( - hal::api::Metal, - &self.instance.metal, - display_handle, - window_handle, - ), + metal: init::(&self.instance.metal, display_handle, window_handle), #[cfg(dx12)] - dx12: init( - hal::api::Dx12, - &self.instance.dx12, - display_handle, - window_handle, - ), + dx12: init::(&self.instance.dx12, display_handle, window_handle), #[cfg(dx11)] - dx11: init( - hal::api::Dx11, - &self.instance.dx11, - display_handle, - window_handle, - ), + dx11: init::(&self.instance.dx11, display_handle, window_handle), #[cfg(gl)] - gl: init( - hal::api::Gles, - &self.instance.gl, - display_handle, - window_handle, - ), + gl: init::(&self.instance.gl, display_handle, window_handle), }; let mut token = Token::root(); @@ -602,7 +559,7 @@ impl Global { #[cfg(vulkan)] vulkan: None, dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: { inst.create_surface_from_visual(visual as _) }, + raw: unsafe { inst.create_surface_from_visual(visual as _) }, }), dx11: None, #[cfg(gl)] @@ -731,7 +688,8 @@ impl Global { if let Some(surface) = compatible_surface { let surface = &A::get_surface(surface); adapters.retain(|exposed| unsafe { - // If the surface does not exist for this backend, then the surface is not supported. + // If the surface does not exist for this backend, + // then the surface is not supported. surface.is_some() && exposed .adapter @@ -835,10 +793,13 @@ impl Global { } let preferred_gpu = match desc.power_preference { - // Since devices of type "Other" might really be "Unknown" and come from APIs like OpenGL that don't specify device type, - // Prefer more Specific types over Other. - // This means that backends which do provide accurate device types will be preferred - // if their device type indicates an actual hardware GPU (integrated or discrete). + // Since devices of type "Other" might really be "Unknown" and come + // from APIs like OpenGL that don't specify device type, Prefer more + // Specific types over Other. + // + // This means that backends which do provide accurate device types + // will be preferred if their device type indicates an actual + // hardware GPU (integrated or discrete). PowerPreference::LowPower => integrated.or(discrete).or(other).or(virt).or(cpu), PowerPreference::HighPerformance => discrete.or(integrated).or(other).or(virt).or(cpu), }; diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index 245d16d18c..ce0f7f5087 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -3,6 +3,7 @@ * into other language-specific user-friendly libraries. */ +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow( // It is much clearer to assert negative conditions with eq! false clippy::bool_assert_comparison, @@ -27,6 +28,7 @@ #![warn( trivial_casts, trivial_numeric_casts, + unsafe_op_in_unsafe_fn, unused_extern_crates, unused_qualifications, // We don't match on a reference, unless required. @@ -289,8 +291,9 @@ platform supports."; #[macro_export] macro_rules! gfx_select { ($id:expr => $global:ident.$method:ident( $($param:expr),* )) => { - // Note: For some reason the cfg aliases defined in build.rs don't succesfully apply in this - // macro so we must specify their equivalents manually + // Note: For some reason the cfg aliases defined in build.rs + // don't succesfully apply in this macro so we must specify + // their equivalents manually. match $id.backend() { #[cfg(any( all(not(target_arch = "wasm32"), not(target_os = "ios"), not(target_os = "macos")), diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 667b8b96ed..cd5a8ffb09 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -66,7 +66,7 @@ impl Resource for ShaderModule { pub struct ShaderError { pub source: String, pub label: Option, - pub inner: E, + pub inner: Box, } #[cfg(feature = "wgsl")] impl fmt::Display for ShaderError { @@ -303,6 +303,8 @@ pub enum ColorStateError { }, #[error("blend factors for {0:?} must be `One`")] InvalidMinMaxBlendFactors(wgt::BlendComponent), + #[error("invalid write mask {0:?}")] + InvalidWriteMask(wgt::ColorWrites), } #[derive(Clone, Debug, Error)] diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 21047199ad..41da2fa585 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -184,7 +184,7 @@ impl Global { hal_usage: conv::map_texture_usage(config.usage, config.format.into()), format_features: wgt::TextureFormatFeatures { allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT, - flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE + flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, }, initialization_status: TextureInitTracker::new(1, 1), diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index dd66b48d14..4302df7e61 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -1,5 +1,5 @@ use crate::{ - device::{DeviceError, HostMap, MissingFeatures}, + device::{DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, hub::{Global, GlobalIdentityHandlerFactory, HalApi, Resource, Token}, id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid}, init_tracker::{BufferInitTracker, TextureInitTracker}, @@ -15,7 +15,7 @@ use std::{borrow::Borrow, num::NonZeroU8, ops::Range, ptr::NonNull}; /// The status code provided to the buffer mapping callback. /// -/// This is very similar to `Result<(), BufferAccessError>`, except that this is FFI-friendly. +/// This is very similar to `BufferAccessResult`, except that this is FFI-friendly. #[repr(C)] #[derive(Debug)] pub enum BufferMapAsyncStatus { @@ -84,7 +84,7 @@ pub struct BufferMapCallback { enum BufferMapCallbackInner { Rust { - callback: Box, + callback: Box, }, C { inner: BufferMapCallbackC, @@ -92,7 +92,7 @@ enum BufferMapCallbackInner { } impl BufferMapCallback { - pub fn from_rust(callback: Box) -> Self { + pub fn from_rust(callback: Box) -> Self { Self { inner: Some(BufferMapCallbackInner::Rust { callback }), } @@ -100,21 +100,50 @@ impl BufferMapCallback { /// # Safety /// - /// - The callback pointer must be valid to call with the provided user_data pointer. - /// - Both pointers must point to valid memory until the callback is invoked, which may happen at an unspecified time. + /// - The callback pointer must be valid to call with the provided user_data + /// pointer. + /// + /// - Both pointers must point to valid memory until the callback is + /// invoked, which may happen at an unspecified time. pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self { Self { inner: Some(BufferMapCallbackInner::C { inner }), } } - pub(crate) fn call(mut self, status: BufferMapAsyncStatus) { + pub(crate) fn call(mut self, result: BufferAccessResult) { match self.inner.take() { Some(BufferMapCallbackInner::Rust { callback }) => { - callback(status); + callback(result); } // SAFETY: the contract of the call to from_c says that this unsafe is sound. Some(BufferMapCallbackInner::C { inner }) => unsafe { + let status = match result { + Ok(()) => BufferMapAsyncStatus::Success, + Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, + Err(BufferAccessError::Invalid) | Err(BufferAccessError::Destroyed) => { + BufferMapAsyncStatus::Invalid + } + Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped, + Err(BufferAccessError::MapAlreadyPending) => { + BufferMapAsyncStatus::MapAlreadyPending + } + Err(BufferAccessError::MissingBufferUsage(_)) => { + BufferMapAsyncStatus::InvalidUsageFlags + } + Err(BufferAccessError::UnalignedRange) + | Err(BufferAccessError::UnalignedRangeSize { .. }) + | Err(BufferAccessError::UnalignedOffset { .. }) => { + BufferMapAsyncStatus::InvalidAlignment + } + Err(BufferAccessError::OutOfBoundsUnderrun { .. }) + | Err(BufferAccessError::OutOfBoundsOverrun { .. }) + | Err(BufferAccessError::NegativeRange { .. }) => { + BufferMapAsyncStatus::InvalidRange + } + Err(_) => BufferMapAsyncStatus::Error, + }; + (inner.callback)(status, inner.user_data); }, None => { @@ -141,6 +170,8 @@ pub struct BufferMapOperation { pub enum BufferAccessError { #[error(transparent)] Device(#[from] DeviceError), + #[error("buffer map failed")] + Failed, #[error("buffer is invalid")] Invalid, #[error("buffer is destroyed")] @@ -178,8 +209,11 @@ pub enum BufferAccessError { start: wgt::BufferAddress, end: wgt::BufferAddress, }, + #[error("buffer map aborted")] + MapAborted, } +pub type BufferAccessResult = Result<(), BufferAccessError>; pub(crate) struct BufferPendingMapping { pub range: Range, pub op: BufferMapOperation, @@ -208,12 +242,14 @@ pub enum CreateBufferError { AccessError(#[from] BufferAccessError), #[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")] UnalignedSize, - #[error("Buffers cannot have empty usage flags")] - EmptyUsage, + #[error("Invalid usage flags {0:?}")] + InvalidUsage(wgt::BufferUsages), #[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")] UsageMismatch(wgt::BufferUsages), #[error("Buffer size {requested} is greater than the maximum buffer size ({maximum})")] MaxBufferSize { requested: u64, maximum: u64 }, + #[error(transparent)] + MissingDownlevelFlags(#[from] MissingDownlevelFlags), } impl Resource for Buffer { @@ -351,7 +387,7 @@ impl Global { let mut token = Token::root(); let (guard, _) = hub.textures.read(&mut token); let texture = guard.try_get(id).ok().flatten(); - let hal_texture = texture.map(|tex| tex.inner.as_raw().unwrap()); + let hal_texture = texture.and_then(|tex| tex.inner.as_raw()); hal_texture_callback(hal_texture); } @@ -394,6 +430,25 @@ impl Global { hal_device_callback(hal_device) } + + /// # Safety + /// - The raw surface handle must not be manually destroyed + pub unsafe fn surface_as_hal_mut) -> R, R>( + &self, + id: SurfaceId, + hal_surface_callback: F, + ) -> R { + profiling::scope!("Surface::as_hal_mut"); + + let mut token = Token::root(); + let (mut guard, _) = self.surfaces.write(&mut token); + let surface = guard.get_mut(id).ok(); + let hal_surface = surface + .and_then(|surface| A::get_surface_mut(surface)) + .map(|surface| &mut surface.raw); + + hal_surface_callback(hal_surface) + } } #[derive(Clone, Copy, Debug)] @@ -435,8 +490,8 @@ pub enum TextureDimensionError { pub enum CreateTextureError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Textures cannot have empty usage flags")] - EmptyUsage, + #[error("Invalid usage flags {0:?}")] + InvalidUsage(wgt::TextureUsages), #[error(transparent)] InvalidDimension(#[from] TextureDimensionError), #[error("Depth texture ({1:?}) can't be created as {0:?}")] @@ -462,6 +517,8 @@ pub enum CreateTextureError { MultisampledNotRenderAttachment, #[error("Texture format {0:?} can't be used due to missing features.")] MissingFeatures(wgt::TextureFormat, #[source] MissingFeatures), + #[error("Sample count {0} is not supported by format {1:?} on this device. It may be supported by your adapter through the TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature.")] + InvalidSampleCount(u32, wgt::TextureFormat), } impl Resource for Texture { @@ -483,13 +540,20 @@ impl Borrow for Texture { #[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize), serde(default))] pub struct TextureViewDescriptor<'a> { - /// Debug label of the texture view. This will show up in graphics debuggers for easy identification. + /// Debug label of the texture view. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, - /// Format of the texture view, or `None` for the same format as the texture itself. + /// Format of the texture view, or `None` for the same format as the texture + /// itself. + /// /// At this time, it must be the same the underlying format of the texture. pub format: Option, - /// The dimension of the texture view. For 1D textures, this must be `1D`. For 2D textures it must be one of - /// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `3D` + /// The dimension of the texture view. + /// + /// - For 1D textures, this must be `1D`. + /// - For 2D textures it must be one of `D2`, `D2Array`, `Cube`, or `CubeArray`. + /// - For 3D textures it must be `3D`. pub dimension: Option, /// Range within the texture that is accessible via this view. pub range: wgt::ImageSubresourceRange, @@ -580,7 +644,9 @@ impl Resource for TextureView { #[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))] pub struct SamplerDescriptor<'a> { - /// Debug label of the sampler. This will show up in graphics debuggers for easy identification. + /// Debug label of the sampler. + /// + /// This will show up in graphics debuggers for easy identification. pub label: Label<'a>, /// How to deal with out of bounds accesses in the u (i.e. x) direction pub address_modes: [wgt::AddressMode; 3], @@ -598,7 +664,8 @@ pub struct SamplerDescriptor<'a> { pub compare: Option, /// Valid values: 1, 2, 4, 8, and 16. pub anisotropy_clamp: Option, - /// Border color to use when address_mode is [`AddressMode::ClampToBorder`](wgt::AddressMode::ClampToBorder) + /// Border color to use when address_mode is + /// [`AddressMode::ClampToBorder`](wgt::AddressMode::ClampToBorder) pub border_color: Option, } diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index c67138eebe..65f02c467d 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -13,8 +13,8 @@ use crate::{ id::{BufferId, TypedId, Valid}, resource::Buffer, track::{ - invalid_resource_state, iterate_bitvec_indices, skip_barrier, ResourceMetadata, - ResourceMetadataProvider, ResourceUses, UsageConflict, + invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, + ResourceUses, UsageConflict, }, LifeGuard, RefCount, }; @@ -124,7 +124,7 @@ impl BufferUsageScope { /// Returns a list of all buffers tracked. pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.used() + self.metadata.owned_ids() } /// Merge the list of buffer states in the given bind group into this usage scope. @@ -147,19 +147,21 @@ impl BufferUsageScope { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - insert_or_merge( - None, - None, - &mut self.state, - &mut self.metadata, - index32, - index, - BufferStateProvider::Direct { state }, - ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), - }, - )?; + unsafe { + insert_or_merge( + None, + None, + &mut self.state, + &mut self.metadata, + index32, + index, + BufferStateProvider::Direct { state }, + ResourceMetadataProvider::Direct { + epoch, + ref_count: Cow::Borrowed(ref_count), + }, + )? + }; } Ok(()) @@ -178,7 +180,7 @@ impl BufferUsageScope { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&scope.metadata.owned) { + for index in scope.metadata.owned_indices() { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); @@ -291,7 +293,7 @@ impl BufferTracker { /// Returns a list of all buffers tracked. pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.used() + self.metadata.owned_ids() } /// Drains all currently pending transitions. @@ -314,7 +316,7 @@ impl BufferTracker { self.tracker_assert_in_bounds(index); unsafe { - let currently_owned = self.metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = self.metadata.contains_unchecked(index); if currently_owned { panic!("Tried to insert buffer already tracked"); @@ -392,7 +394,7 @@ impl BufferTracker { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&tracker.metadata.owned) { + for index in tracker.metadata.owned_indices() { self.tracker_assert_in_bounds(index); tracker.tracker_assert_in_bounds(index); unsafe { @@ -432,7 +434,7 @@ impl BufferTracker { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&scope.metadata.owned) { + for index in scope.metadata.owned_indices() { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); unsafe { @@ -490,27 +492,29 @@ impl BufferTracker { scope.tracker_assert_in_bounds(index); - if !scope.metadata.owned.get(index).unwrap_unchecked() { + if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - insert_or_barrier_update( - None, - Some(&mut self.start), - &mut self.end, - &mut self.metadata, - index as u32, - index, - BufferStateProvider::Indirect { - state: &scope.state, - }, - None, - ResourceMetadataProvider::Indirect { - metadata: &scope.metadata, - }, - &mut self.temp, - ); + unsafe { + insert_or_barrier_update( + None, + Some(&mut self.start), + &mut self.end, + &mut self.metadata, + index as u32, + index, + BufferStateProvider::Indirect { + state: &scope.state, + }, + None, + ResourceMetadataProvider::Indirect { + metadata: &scope.metadata, + }, + &mut self.temp, + ) + }; - scope.metadata.reset(index); + unsafe { scope.metadata.remove(index) }; } } @@ -525,22 +529,19 @@ impl BufferTracker { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - if index > self.metadata.owned.len() { + if index > self.metadata.size() { return false; } self.tracker_assert_in_bounds(index); unsafe { - if self.metadata.owned.get(index).unwrap_unchecked() { - let existing_epoch = self.metadata.epochs.get_unchecked_mut(index); - let existing_ref_count = self.metadata.ref_counts.get_unchecked_mut(index); - - if *existing_epoch == epoch - && existing_ref_count.as_mut().unwrap_unchecked().load() == 1 - { - self.metadata.reset(index); + if self.metadata.contains_unchecked(index) { + let existing_epoch = self.metadata.get_epoch_unchecked(index); + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + if existing_epoch == epoch && existing_ref_count.load() == 1 { + self.metadata.remove(index); return true; } } @@ -570,7 +571,7 @@ impl BufferStateProvider<'_> { BufferStateProvider::Direct { state } => state, BufferStateProvider::Indirect { state } => { strict_assert!(index < state.len()); - *state.get_unchecked(index) + *unsafe { state.get_unchecked(index) } } } } @@ -596,29 +597,33 @@ unsafe fn insert_or_merge( state_provider: BufferStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { - insert( - life_guard, - start_states, + unsafe { + insert( + life_guard, + start_states, + current_states, + resource_metadata, + index, + state_provider, + None, + metadata_provider, + ) + }; + return Ok(()); + } + + unsafe { + merge( current_states, - resource_metadata, + index32, index, state_provider, - None, metadata_provider, - ); - return Ok(()); + ) } - - merge( - current_states, - index32, - index, - state_provider, - metadata_provider, - ) } /// If the resource isn't tracked @@ -651,32 +656,36 @@ unsafe fn insert_or_barrier_update( metadata_provider: ResourceMetadataProvider<'_, A>, barriers: &mut Vec>, ) { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { - insert( - life_guard, - start_states, - current_states, - resource_metadata, - index, - start_state_provider, - end_state_provider, - metadata_provider, - ); + unsafe { + insert( + life_guard, + start_states, + current_states, + resource_metadata, + index, + start_state_provider, + end_state_provider, + metadata_provider, + ) + }; return; } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - barrier( - current_states, - index32, - index, - start_state_provider, - barriers, - ); - - update(current_states, index, update_state_provider); + unsafe { + barrier( + current_states, + index32, + index, + start_state_provider, + barriers, + ) + }; + + unsafe { update(current_states, index, update_state_provider) }; } #[inline(always)] @@ -690,8 +699,9 @@ unsafe fn insert( end_state_provider: Option>, metadata_provider: ResourceMetadataProvider<'_, A>, ) { - let new_start_state = start_state_provider.get_state(index); - let new_end_state = end_state_provider.map_or(new_start_state, |p| p.get_state(index)); + let new_start_state = unsafe { start_state_provider.get_state(index) }; + let new_end_state = + end_state_provider.map_or(new_start_state, |p| unsafe { p.get_state(index) }); // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. @@ -700,16 +710,15 @@ unsafe fn insert( log::trace!("\tbuf {index}: insert {new_start_state:?}..{new_end_state:?}"); - if let Some(&mut ref mut start_state) = start_states { - *start_state.get_unchecked_mut(index) = new_start_state; - } - *current_states.get_unchecked_mut(index) = new_end_state; - - let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); + unsafe { + if let Some(&mut ref mut start_state) = start_states { + *start_state.get_unchecked_mut(index) = new_start_state; + } + *current_states.get_unchecked_mut(index) = new_end_state; - resource_metadata.owned.set(index, true); - *resource_metadata.epochs.get_unchecked_mut(index) = epoch; - *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); + let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); + resource_metadata.insert(index, epoch, ref_count); + } } #[inline(always)] @@ -720,14 +729,18 @@ unsafe fn merge( state_provider: BufferStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let current_state = current_states.get_unchecked_mut(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { current_states.get_unchecked_mut(index) }; + let new_state = unsafe { state_provider.get_state(index) }; let merged_state = *current_state | new_state; if invalid_resource_state(merged_state) { return Err(UsageConflict::from_buffer( - BufferId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + BufferId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), *current_state, new_state, )); @@ -748,8 +761,8 @@ unsafe fn barrier( state_provider: BufferStateProvider<'_>, barriers: &mut Vec>, ) { - let current_state = *current_states.get_unchecked(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { *current_states.get_unchecked(index) }; + let new_state = unsafe { state_provider.get_state(index) }; if skip_barrier(current_state, new_state) { return; @@ -770,8 +783,8 @@ unsafe fn update( index: usize, state_provider: BufferStateProvider<'_>, ) { - let current_state = current_states.get_unchecked_mut(index); - let new_state = state_provider.get_state(index); + let current_state = unsafe { current_states.get_unchecked_mut(index) }; + let new_state = unsafe { state_provider.get_state(index) }; *current_state = new_state; } diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs new file mode 100644 index 0000000000..728ff5ca0e --- /dev/null +++ b/wgpu-core/src/track/metadata.rs @@ -0,0 +1,268 @@ +//! The `ResourceMetadata` type. + +use crate::{ + hub, + id::{self, TypedId}, + Epoch, LifeGuard, RefCount, +}; +use bit_vec::BitVec; +use std::{borrow::Cow, marker::PhantomData, mem}; + +/// A set of resources, holding a [`RefCount`] and epoch for each member. +/// +/// Testing for membership is fast, and iterating over members is +/// reasonably fast in practice. Storage consumption is proportional +/// to the largest id index of any member, not to the number of +/// members, but a bit vector tracks occupancy, so iteration touches +/// only occupied elements. +#[derive(Debug)] +pub(super) struct ResourceMetadata { + /// If the resource with index `i` is a member, `owned[i]` is `true`. + owned: BitVec, + + /// A vector parallel to `owned`, holding clones of members' `RefCount`s. + ref_counts: Vec>, + + /// A vector parallel to `owned`, holding the epoch of each members' id. + epochs: Vec, + + /// This tells Rust that this type should be covariant with `A`. + _phantom: PhantomData, +} + +impl ResourceMetadata { + pub(super) fn new() -> Self { + Self { + owned: BitVec::default(), + ref_counts: Vec::new(), + epochs: Vec::new(), + + _phantom: PhantomData, + } + } + + /// Returns the number of indices we can accommodate. + pub(super) fn size(&self) -> usize { + self.owned.len() + } + + pub(super) fn set_size(&mut self, size: usize) { + self.ref_counts.resize(size, None); + self.epochs.resize(size, u32::MAX); + + resize_bitvec(&mut self.owned, size); + } + + /// Ensures a given index is in bounds for all arrays and does + /// sanity checks of the presence of a refcount. + /// + /// In release mode this function is completely empty and is removed. + #[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))] + pub(super) fn tracker_assert_in_bounds(&self, index: usize) { + strict_assert!(index < self.owned.len()); + strict_assert!(index < self.ref_counts.len()); + strict_assert!(index < self.epochs.len()); + + strict_assert!(if self.contains(index) { + self.ref_counts[index].is_some() + } else { + true + }); + } + + /// Returns true if the tracker owns no resources. + /// + /// This is a O(n) operation. + pub(super) fn is_empty(&self) -> bool { + !self.owned.any() + } + + /// Returns true if the set contains the resource with the given index. + pub(super) fn contains(&self, index: usize) -> bool { + self.owned[index] + } + + /// Returns true if the set contains the resource with the given index. + /// + /// # Safety + /// + /// The given `index` must be in bounds for this `ResourceMetadata`'s + /// existing tables. See `tracker_assert_in_bounds`. + #[inline(always)] + pub(super) unsafe fn contains_unchecked(&self, index: usize) -> bool { + unsafe { self.owned.get(index).unwrap_unchecked() } + } + + /// Insert a resource into the set. + /// + /// Add the resource with the given index, epoch, and reference count to the + /// set. + /// + /// # Safety + /// + /// The given `index` must be in bounds for this `ResourceMetadata`'s + /// existing tables. See `tracker_assert_in_bounds`. + #[inline(always)] + pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, ref_count: RefCount) { + self.owned.set(index, true); + unsafe { + *self.epochs.get_unchecked_mut(index) = epoch; + *self.ref_counts.get_unchecked_mut(index) = Some(ref_count); + } + } + + /// Get the [`RefCount`] of the resource with the given index. + /// + /// # Safety + /// + /// The given `index` must be in bounds for this `ResourceMetadata`'s + /// existing tables. See `tracker_assert_in_bounds`. + #[inline(always)] + pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> &RefCount { + unsafe { + self.ref_counts + .get_unchecked(index) + .as_ref() + .unwrap_unchecked() + } + } + + /// Get the [`Epoch`] of the id of the resource with the given index. + /// + /// # Safety + /// + /// The given `index` must be in bounds for this `ResourceMetadata`'s + /// existing tables. See `tracker_assert_in_bounds`. + #[inline(always)] + pub(super) unsafe fn get_epoch_unchecked(&self, index: usize) -> Epoch { + unsafe { *self.epochs.get_unchecked(index) } + } + + /// Returns an iterator over the ids for all resources owned by `self`. + pub(super) fn owned_ids(&self) -> impl Iterator> + '_ { + if !self.owned.is_empty() { + self.tracker_assert_in_bounds(self.owned.len() - 1) + }; + iterate_bitvec_indices(&self.owned).map(move |index| { + let epoch = unsafe { *self.epochs.get_unchecked(index) }; + id::Valid(Id::zip(index as u32, epoch, A::VARIANT)) + }) + } + + /// Returns an iterator over the indices of all resources owned by `self`. + pub(super) fn owned_indices(&self) -> impl Iterator + '_ { + if !self.owned.is_empty() { + self.tracker_assert_in_bounds(self.owned.len() - 1) + }; + iterate_bitvec_indices(&self.owned) + } + + /// Remove the resource with the given index from the set. + pub(super) unsafe fn remove(&mut self, index: usize) { + unsafe { + *self.ref_counts.get_unchecked_mut(index) = None; + *self.epochs.get_unchecked_mut(index) = u32::MAX; + } + self.owned.set(index, false); + } +} + +/// A source of resource metadata. +/// +/// This is used to abstract over the various places +/// trackers can get new resource metadata from. +pub(super) enum ResourceMetadataProvider<'a, A: hub::HalApi> { + /// Comes directly from explicit values. + Direct { + epoch: Epoch, + ref_count: Cow<'a, RefCount>, + }, + /// Comes from another metadata tracker. + Indirect { metadata: &'a ResourceMetadata }, + /// The epoch is given directly, but the life count comes from the resource itself. + Resource { epoch: Epoch }, +} +impl ResourceMetadataProvider<'_, A> { + /// Get the epoch and an owned refcount from this. + /// + /// # Safety + /// + /// - The index must be in bounds of the metadata tracker if this uses an indirect source. + /// - life_guard must be Some if this uses a Resource source. + #[inline(always)] + pub(super) unsafe fn get_own( + self, + life_guard: Option<&LifeGuard>, + index: usize, + ) -> (Epoch, RefCount) { + match self { + ResourceMetadataProvider::Direct { epoch, ref_count } => { + (epoch, ref_count.into_owned()) + } + ResourceMetadataProvider::Indirect { metadata } => { + metadata.tracker_assert_in_bounds(index); + (unsafe { *metadata.epochs.get_unchecked(index) }, { + let ref_count = unsafe { metadata.ref_counts.get_unchecked(index) }; + unsafe { ref_count.clone().unwrap_unchecked() } + }) + } + ResourceMetadataProvider::Resource { epoch } => { + strict_assert!(life_guard.is_some()); + (epoch, unsafe { life_guard.unwrap_unchecked() }.add_ref()) + } + } + } + /// Get the epoch from this. + /// + /// # Safety + /// + /// - The index must be in bounds of the metadata tracker if this uses an indirect source. + #[inline(always)] + pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch { + match self { + ResourceMetadataProvider::Direct { epoch, .. } + | ResourceMetadataProvider::Resource { epoch, .. } => epoch, + ResourceMetadataProvider::Indirect { metadata } => { + metadata.tracker_assert_in_bounds(index); + unsafe { *metadata.epochs.get_unchecked(index) } + } + } + } +} + +/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is. +fn resize_bitvec(vec: &mut BitVec, size: usize) { + let owned_size_to_grow = size.checked_sub(vec.len()); + if let Some(delta) = owned_size_to_grow { + if delta != 0 { + vec.grow(delta, false); + } + } else { + vec.truncate(size); + } +} + +/// Produces an iterator that yields the indexes of all bits that are set in the bitvec. +/// +/// Will skip entire usize's worth of bits if they are all false. +fn iterate_bitvec_indices(ownership: &BitVec) -> impl Iterator + '_ { + const BITS_PER_BLOCK: usize = mem::size_of::() * 8; + + let size = ownership.len(); + + ownership + .blocks() + .enumerate() + .filter(|&(_, word)| word != 0) + .flat_map(move |(word_index, mut word)| { + let bit_start = word_index * BITS_PER_BLOCK; + let bit_end = (bit_start + BITS_PER_BLOCK).min(size); + + (bit_start..bit_end).filter(move |_| { + let active = word & 0b1 != 0; + word >>= 1; + + active + }) + }) +} diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 830244bee4..9d85e1ab7b 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -1,95 +1,100 @@ /*! Resource State and Lifetime Trackers - * - * These structures are responsible for keeping track of resource state, - * generating barriers where needed, and making sure resources are kept - * alive until the trackers die. - * - * ## General Architecture - * - * Tracking is some of the hottest code in the entire codebase, so the trackers - * are designed to be as cache efficient as possible. They store resource state - * in flat vectors, storing metadata SOA style, one vector per type of metadata. - * - * A lot of the tracker code is deeply unsafe, using unchecked accesses all over - * to make performance as good as possible. However, for all unsafe accesses, there - * is a corresponding debug assert the checks if that access is valid. This helps - * get bugs caught fast, while still letting users not need to pay for the bounds - * checks. - * - * In wgpu, resource IDs are allocated and re-used, so will always be as low - * as reasonably possible. This allows us to use the ID as an index into an array. - * - * ## Statefulness - * - * There are two main types of trackers, stateful and stateless. - * - * Stateful trackers are for buffers and textures. They both have - * resource state attached to them which needs to be used to generate - * automatic synchronization. Because of the different requirements of - * buffers and textures, they have two separate tracking structures. - * - * Stateless trackers only store metadata and own the given resource. - * - * ## Use Case - * - * Within each type of tracker, the trackers are further split into 3 different - * use cases, Bind Group, Usage Scope, and a full Tracker. - * - * Bind Group trackers are just a list of different resources, their refcount, - * and how they are used. Textures are used via a selector and a usage type. - * Buffers by just a usage type. Stateless resources don't have a usage type. - * - * Usage Scope trackers are only for stateful resources. These trackers represent - * a single [`UsageScope`] in the spec. When a use is added to a usage scope, - * it is merged with all other uses of that resource in that scope. If there - * is a usage conflict, merging will fail and an error will be reported. - * - * Full trackers represent a before and after state of a resource. These - * are used for tracking on the device and on command buffers. The before - * state represents the state the resource is first used as in the command buffer, - * the after state is the state the command buffer leaves the resource in. - * These double ended buffers can then be used to generate the needed transitions - * between command buffers. - * - * ## Dense Datastructure with Sparse Data - * - * This tracking system is based on having completely dense data, but trackers do - * not always contain every resource. Some resources (or even most resources) go - * unused in any given command buffer. So to help speed up the process of iterating - * through possibly thousands of resources, we use a bit vector to represent if - * a resource is in the buffer or not. This allows us extremely efficient memory - * utilization, as well as being able to bail out of whole blocks of 32-64 resources - * with a single usize comparison with zero. In practice this means that merging - * partially resident buffers is extremely quick. - * - * The main advantage of this dense datastructure is that we can do merging - * of trackers in an extremely efficient fashion that results in us doing linear - * scans down a couple of buffers. CPUs and their caches absolutely eat this up. - * - * ## Stateful Resource Operations - * - * All operations on stateful trackers boil down to one of four operations: - * - `insert(tracker, new_state)` adds a resource with a given state to the tracker - * for the first time. - * - `merge(tracker, new_state)` merges this new state with the previous state, checking - * for usage conflicts. - * - `barrier(tracker, new_state)` compares the given state to the existing state and - * generates the needed barriers. - * - `update(tracker, new_state)` takes the given new state and overrides the old state. - * - * This allows us to compose the operations to form the various kinds of tracker merges - * that need to happen in the codebase. For each resource in the given merger, the following - * operation applies: - * - * UsageScope <- Resource = insert(scope, usage) OR merge(scope, usage) - * UsageScope <- UsageScope = insert(scope, scope) OR merge(scope, scope) - * CommandBuffer <- UsageScope = insert(buffer.start, buffer.end, scope) OR barrier(buffer.end, scope) + update(buffer.end, scope) - * Deivce <- CommandBuffer = insert(device.start, device.end, buffer.start, buffer.end) OR barrier(device.end, buffer.start) + update(device.end, buffer.end) - * - * [`UsageScope`]: https://gpuweb.github.io/gpuweb/#programming-model-synchronization -!*/ + +These structures are responsible for keeping track of resource state, +generating barriers where needed, and making sure resources are kept +alive until the trackers die. + +## General Architecture + +Tracking is some of the hottest code in the entire codebase, so the trackers +are designed to be as cache efficient as possible. They store resource state +in flat vectors, storing metadata SOA style, one vector per type of metadata. + +A lot of the tracker code is deeply unsafe, using unchecked accesses all over +to make performance as good as possible. However, for all unsafe accesses, there +is a corresponding debug assert the checks if that access is valid. This helps +get bugs caught fast, while still letting users not need to pay for the bounds +checks. + +In wgpu, resource IDs are allocated and re-used, so will always be as low +as reasonably possible. This allows us to use the ID as an index into an array. + +## Statefulness + +There are two main types of trackers, stateful and stateless. + +Stateful trackers are for buffers and textures. They both have +resource state attached to them which needs to be used to generate +automatic synchronization. Because of the different requirements of +buffers and textures, they have two separate tracking structures. + +Stateless trackers only store metadata and own the given resource. + +## Use Case + +Within each type of tracker, the trackers are further split into 3 different +use cases, Bind Group, Usage Scope, and a full Tracker. + +Bind Group trackers are just a list of different resources, their refcount, +and how they are used. Textures are used via a selector and a usage type. +Buffers by just a usage type. Stateless resources don't have a usage type. + +Usage Scope trackers are only for stateful resources. These trackers represent +a single [`UsageScope`] in the spec. When a use is added to a usage scope, +it is merged with all other uses of that resource in that scope. If there +is a usage conflict, merging will fail and an error will be reported. + +Full trackers represent a before and after state of a resource. These +are used for tracking on the device and on command buffers. The before +state represents the state the resource is first used as in the command buffer, +the after state is the state the command buffer leaves the resource in. +These double ended buffers can then be used to generate the needed transitions +between command buffers. + +## Dense Datastructure with Sparse Data + +This tracking system is based on having completely dense data, but trackers do +not always contain every resource. Some resources (or even most resources) go +unused in any given command buffer. So to help speed up the process of iterating +through possibly thousands of resources, we use a bit vector to represent if +a resource is in the buffer or not. This allows us extremely efficient memory +utilization, as well as being able to bail out of whole blocks of 32-64 resources +with a single usize comparison with zero. In practice this means that merging +partially resident buffers is extremely quick. + +The main advantage of this dense datastructure is that we can do merging +of trackers in an extremely efficient fashion that results in us doing linear +scans down a couple of buffers. CPUs and their caches absolutely eat this up. + +## Stateful Resource Operations + +All operations on stateful trackers boil down to one of four operations: +- `insert(tracker, new_state)` adds a resource with a given state to the tracker + for the first time. +- `merge(tracker, new_state)` merges this new state with the previous state, checking + for usage conflicts. +- `barrier(tracker, new_state)` compares the given state to the existing state and + generates the needed barriers. +- `update(tracker, new_state)` takes the given new state and overrides the old state. + +This allows us to compose the operations to form the various kinds of tracker merges +that need to happen in the codebase. For each resource in the given merger, the following +operation applies: + +``` +UsageScope <- Resource = insert(scope, usage) OR merge(scope, usage) +UsageScope <- UsageScope = insert(scope, scope) OR merge(scope, scope) +CommandBuffer <- UsageScope = insert(buffer.start, buffer.end, scope) + OR barrier(buffer.end, scope) + update(buffer.end, scope) +Device <- CommandBuffer = insert(device.start, device.end, buffer.start, buffer.end) + OR barrier(device.end, buffer.start) + update(device.end, buffer.end) +``` + +[`UsageScope`]: https://gpuweb.github.io/gpuweb/#programming-model-synchronization +*/ mod buffer; +mod metadata; mod range; mod stateless; mod texture; @@ -97,14 +102,14 @@ mod texture; use crate::{ binding_model, command, conv, hub, id::{self, TypedId}, - pipeline, resource, Epoch, LifeGuard, RefCount, + pipeline, resource, }; -use bit_vec::BitVec; -use std::{borrow::Cow, fmt, marker::PhantomData, mem, num::NonZeroU32, ops}; +use std::{fmt, num::NonZeroU32, ops}; use thiserror::Error; pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope}; +use metadata::{ResourceMetadata, ResourceMetadataProvider}; pub(crate) use stateless::{StatelessBindGroupSate, StatelessTracker}; pub(crate) use texture::{ TextureBindGroupState, TextureSelector, TextureTracker, TextureUsageScope, @@ -201,43 +206,6 @@ fn skip_barrier(old_state: T, new_state: T) -> bool { old_state == new_state && old_state.all_ordered() } -/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is. -fn resize_bitvec(vec: &mut BitVec, size: usize) { - let owned_size_to_grow = size.checked_sub(vec.len()); - if let Some(delta) = owned_size_to_grow { - if delta != 0 { - vec.grow(delta, false); - } - } else { - vec.truncate(size); - } -} - -/// Produces an iterator that yields the indexes of all bits that are set in the bitvec. -/// -/// Will skip entire usize's worth of bits if they are all false. -fn iterate_bitvec_indices(ownership: &BitVec) -> impl Iterator + '_ { - const BITS_PER_BLOCK: usize = mem::size_of::() * 8; - - let size = ownership.len(); - - ownership - .blocks() - .enumerate() - .filter(|&(_, word)| word != 0) - .flat_map(move |(word_index, mut word)| { - let bit_start = word_index * BITS_PER_BLOCK; - let bit_end = (bit_start + BITS_PER_BLOCK).min(size); - - (bit_start..bit_end).filter(move |_| { - let active = word & 0b1 != 0; - word >>= 1; - - active - }) - }) -} - #[derive(Clone, Debug, Error, Eq, PartialEq)] pub enum UsageConflict { #[error("Attempted to use invalid buffer")] @@ -338,142 +306,6 @@ impl fmt::Display for InvalidUse { } } -/// SOA container for storing metadata of a resource. -/// -/// This contins the ownership bitvec, the refcount of -/// the resource, and the epoch of the object's full ID. -#[derive(Debug)] -pub(crate) struct ResourceMetadata { - owned: BitVec, - ref_counts: Vec>, - epochs: Vec, - - _phantom: PhantomData, -} -impl ResourceMetadata { - pub fn new() -> Self { - Self { - owned: BitVec::default(), - ref_counts: Vec::new(), - epochs: Vec::new(), - - _phantom: PhantomData, - } - } - - pub fn set_size(&mut self, size: usize) { - self.ref_counts.resize(size, None); - self.epochs.resize(size, u32::MAX); - - resize_bitvec(&mut self.owned, size); - } - - /// Ensures a given index is in bounds for all arrays and does - /// sanity checks of the presence of a refcount. - /// - /// In release mode this function is completely empty and is removed. - #[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))] - fn tracker_assert_in_bounds(&self, index: usize) { - strict_assert!(index < self.owned.len()); - strict_assert!(index < self.ref_counts.len()); - strict_assert!(index < self.epochs.len()); - - strict_assert!(if self.owned.get(index).unwrap() { - self.ref_counts[index].is_some() - } else { - true - }); - } - - /// Returns true if the tracker owns no resources. - /// - /// This is a O(n) operation. - fn is_empty(&self) -> bool { - !self.owned.any() - } - - /// Returns ids for all resources we own. - fn used(&self) -> impl Iterator> + '_ { - if !self.owned.is_empty() { - self.tracker_assert_in_bounds(self.owned.len() - 1) - }; - iterate_bitvec_indices(&self.owned).map(move |index| { - let epoch = unsafe { *self.epochs.get_unchecked(index) }; - id::Valid(Id::zip(index as u32, epoch, A::VARIANT)) - }) - } - - /// Resets the metadata for a given index to sane "invalid" values. - unsafe fn reset(&mut self, index: usize) { - *self.ref_counts.get_unchecked_mut(index) = None; - *self.epochs.get_unchecked_mut(index) = u32::MAX; - self.owned.set(index, false); - } -} - -/// A source of resource metadata. -/// -/// This is used to abstract over the various places -/// trackers can get new resource metadata from. -enum ResourceMetadataProvider<'a, A: hub::HalApi> { - /// Comes directly from explicit values. - Direct { - epoch: Epoch, - ref_count: Cow<'a, RefCount>, - }, - /// Comes from another metadata tracker. - Indirect { metadata: &'a ResourceMetadata }, - /// The epoch is given directly, but the life count comes from the resource itself. - Resource { epoch: Epoch }, -} -impl ResourceMetadataProvider<'_, A> { - /// Get the epoch and an owned refcount from this. - /// - /// # Safety - /// - /// - The index must be in bounds of the metadata tracker if this uses an indirect source. - /// - life_guard must be Some if this uses a Resource source. - #[inline(always)] - unsafe fn get_own(self, life_guard: Option<&LifeGuard>, index: usize) -> (Epoch, RefCount) { - match self { - ResourceMetadataProvider::Direct { epoch, ref_count } => { - (epoch, ref_count.into_owned()) - } - ResourceMetadataProvider::Indirect { metadata } => { - metadata.tracker_assert_in_bounds(index); - ( - *metadata.epochs.get_unchecked(index), - metadata - .ref_counts - .get_unchecked(index) - .clone() - .unwrap_unchecked(), - ) - } - ResourceMetadataProvider::Resource { epoch } => { - strict_assert!(life_guard.is_some()); - (epoch, life_guard.unwrap_unchecked().add_ref()) - } - } - } - /// Get the epoch from this. - /// - /// # Safety - /// - /// - The index must be in bounds of the metadata tracker if this uses an indirect source. - #[inline(always)] - unsafe fn get_epoch(self, index: usize) -> Epoch { - match self { - ResourceMetadataProvider::Direct { epoch, .. } - | ResourceMetadataProvider::Resource { epoch, .. } => epoch, - ResourceMetadataProvider::Indirect { metadata } => { - metadata.tracker_assert_in_bounds(index); - *metadata.epochs.get_unchecked(index) - } - } - } -} - /// All the usages that a bind group contains. The uses are not deduplicated in any way /// and may include conflicting uses. This is fully compliant by the WebGPU spec. /// @@ -560,9 +392,11 @@ impl RenderBundleScope { textures: &hub::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; + unsafe { + self.textures + .merge_bind_group(textures, &bind_group.textures)? + }; Ok(()) } @@ -607,9 +441,11 @@ impl UsageScope { textures: &hub::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + unsafe { + self.buffers.merge_bind_group(&bind_group.buffers)?; + self.textures + .merge_bind_group(textures, &bind_group.textures)?; + } Ok(()) } @@ -736,13 +572,19 @@ impl Tracker { scope: &mut UsageScope, bind_group: &BindGroupStates, ) { - self.buffers - .set_and_remove_from_usage_scope_sparse(&mut scope.buffers, bind_group.buffers.used()); - self.textures.set_and_remove_from_usage_scope_sparse( - textures, - &mut scope.textures, - &bind_group.textures, - ); + unsafe { + self.buffers.set_and_remove_from_usage_scope_sparse( + &mut scope.buffers, + bind_group.buffers.used(), + ) + }; + unsafe { + self.textures.set_and_remove_from_usage_scope_sparse( + textures, + &mut scope.textures, + &bind_group.textures, + ) + }; } /// Tracks the stateless resources from the given renderbundle. It is expected diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 4267e829db..1d0fd5997a 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -9,7 +9,7 @@ use std::marker::PhantomData; use crate::{ hub, id::{TypedId, Valid}, - track::{iterate_bitvec_indices, ResourceMetadata}, + track::ResourceMetadata, RefCount, }; @@ -84,14 +84,14 @@ impl StatelessTracker { /// Extend the vectors to let the given index be valid. fn allow_index(&mut self, index: usize) { - if index >= self.metadata.owned.len() { + if index >= self.metadata.size() { self.set_size(index + 1); } } /// Returns a list of all resources tracked. pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.used() + self.metadata.owned_ids() } /// Inserts a single resource into the resource tracker. @@ -109,9 +109,7 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - *self.metadata.epochs.get_unchecked_mut(index) = epoch; - *self.metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); - self.metadata.owned.set(index, true); + self.metadata.insert(index, epoch, ref_count); } } @@ -130,9 +128,8 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - *self.metadata.epochs.get_unchecked_mut(index) = epoch; - *self.metadata.ref_counts.get_unchecked_mut(index) = Some(item.life_guard().add_ref()); - self.metadata.owned.set(index, true); + self.metadata + .insert(index, epoch, item.life_guard().add_ref()); } Some(item) @@ -143,30 +140,21 @@ impl StatelessTracker { /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. pub fn add_from_tracker(&mut self, other: &Self) { - let incoming_size = other.metadata.owned.len(); - if incoming_size > self.metadata.owned.len() { + let incoming_size = other.metadata.size(); + if incoming_size > self.metadata.size() { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&other.metadata.owned) { + for index in other.metadata.owned_indices() { self.tracker_assert_in_bounds(index); other.tracker_assert_in_bounds(index); unsafe { - let previously_owned = self.metadata.owned.get(index).unwrap_unchecked(); + let previously_owned = self.metadata.contains_unchecked(index); if !previously_owned { - self.metadata.owned.set(index, true); - - let other_ref_count = other - .metadata - .ref_counts - .get_unchecked(index) - .clone() - .unwrap_unchecked(); - *self.metadata.ref_counts.get_unchecked_mut(index) = Some(other_ref_count); - - let epoch = *other.metadata.epochs.get_unchecked(index); - *self.metadata.epochs.get_unchecked_mut(index) = epoch; + let epoch = other.metadata.get_epoch_unchecked(index); + let other_ref_count = other.metadata.get_ref_count_unchecked(index); + self.metadata.insert(index, epoch, other_ref_count.clone()); } } } @@ -183,22 +171,19 @@ impl StatelessTracker { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - if index > self.metadata.owned.len() { + if index > self.metadata.size() { return false; } self.tracker_assert_in_bounds(index); unsafe { - if self.metadata.owned.get(index).unwrap_unchecked() { - let existing_epoch = self.metadata.epochs.get_unchecked_mut(index); - let existing_ref_count = self.metadata.ref_counts.get_unchecked_mut(index); - - if *existing_epoch == epoch - && existing_ref_count.as_mut().unwrap_unchecked().load() == 1 - { - self.metadata.reset(index); + if self.metadata.contains_unchecked(index) { + let existing_epoch = self.metadata.get_epoch_unchecked(index); + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + if existing_epoch == epoch && existing_ref_count.load() == 1 { + self.metadata.remove(index); return true; } } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 17f4e57964..bc0eb2875e 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -25,8 +25,8 @@ use crate::{ id::{TextureId, TypedId, Valid}, resource::Texture, track::{ - invalid_resource_state, iterate_bitvec_indices, skip_barrier, ResourceMetadata, - ResourceMetadataProvider, ResourceUses, UsageConflict, + invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, + ResourceUses, UsageConflict, }, LifeGuard, RefCount, }; @@ -114,7 +114,7 @@ impl ComplexTextureState { strict_assert_eq!(invalid_resource_state(desired_state), false); let mips = selector.mips.start as usize..selector.mips.end as usize; - for mip in complex.mips.get_unchecked_mut(mips) { + for mip in unsafe { complex.mips.get_unchecked_mut(mips) } { for &mut (_, ref mut state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) { *state = desired_state; } @@ -238,7 +238,7 @@ impl TextureUsageScope { strict_assert!(index < self.set.simple.len()); - strict_assert!(if self.metadata.owned.get(index).unwrap() + strict_assert!(if self.metadata.contains(index) && self.set.simple[index] == TextureUses::COMPLEX { self.set.complex.contains_key(&(index as u32)) @@ -258,7 +258,7 @@ impl TextureUsageScope { /// Returns a list of all textures tracked. pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.used() + self.metadata.owned_ids() } /// Returns true if the tracker owns no resources. @@ -285,15 +285,16 @@ impl TextureUsageScope { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&scope.metadata.owned) { + for index in scope.metadata.owned_indices() { let index32 = index as u32; self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; unsafe { insert_or_merge( - texture_data_from_texture(storage, index32), + texture_data, &mut self.set, &mut self.metadata, index32, @@ -327,7 +328,7 @@ impl TextureUsageScope { bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { for &(id, ref selector, ref ref_count, state) in &bind_group.textures { - self.merge_single(storage, id, selector.clone(), ref_count, state)?; + unsafe { self.merge_single(storage, id, selector.clone(), ref_count, state)? }; } Ok(()) @@ -359,18 +360,21 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); - insert_or_merge( - texture_data_from_texture(storage, index32), - &mut self.set, - &mut self.metadata, - index32, - index, - TextureStateProvider::from_option(selector, new_state), - ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), - }, - )?; + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + unsafe { + insert_or_merge( + texture_data, + &mut self.set, + &mut self.metadata, + index32, + index, + TextureStateProvider::from_option(selector, new_state), + ResourceMetadataProvider::Direct { + epoch, + ref_count: Cow::Borrowed(ref_count), + }, + )? + }; Ok(()) } @@ -407,14 +411,14 @@ impl TextureTracker { strict_assert!(index < self.start_set.simple.len()); strict_assert!(index < self.end_set.simple.len()); - strict_assert!(if self.metadata.owned.get(index).unwrap() + strict_assert!(if self.metadata.contains(index) && self.start_set.simple[index] == TextureUses::COMPLEX { self.start_set.complex.contains_key(&(index as u32)) } else { true }); - strict_assert!(if self.metadata.owned.get(index).unwrap() + strict_assert!(if self.metadata.contains(index) && self.end_set.simple[index] == TextureUses::COMPLEX { self.end_set.complex.contains_key(&(index as u32)) @@ -443,7 +447,7 @@ impl TextureTracker { /// Returns a list of all textures tracked. pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.used() + self.metadata.owned_ids() } /// Drains all currently pending transitions. @@ -465,11 +469,7 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); - self.metadata - .ref_counts - .get_unchecked(index) - .as_ref() - .unwrap_unchecked() + unsafe { self.metadata.get_ref_count_unchecked(index) } } /// Inserts a single texture and a state into the resource tracker. @@ -487,7 +487,7 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); unsafe { - let currently_owned = self.metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = self.metadata.contains_unchecked(index); if currently_owned { panic!("Tried to insert texture already tracked"); @@ -570,7 +570,7 @@ impl TextureTracker { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&tracker.metadata.owned) { + for index in tracker.metadata.owned_indices() { let index32 = index as u32; self.tracker_assert_in_bounds(index); @@ -616,7 +616,7 @@ impl TextureTracker { self.set_size(incoming_size); } - for index in iterate_bitvec_indices(&scope.metadata.owned) { + for index in scope.metadata.owned_indices() { let index32 = index as u32; self.tracker_assert_in_bounds(index); @@ -674,25 +674,28 @@ impl TextureTracker { let index = index32 as usize; scope.tracker_assert_in_bounds(index); - if !scope.metadata.owned.get(index).unwrap_unchecked() { + if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - insert_or_barrier_update( - texture_data_from_texture(storage, index32), - Some(&mut self.start_set), - &mut self.end_set, - &mut self.metadata, - index32, - index, - TextureStateProvider::TextureSet { set: &scope.set }, - None, - ResourceMetadataProvider::Indirect { - metadata: &scope.metadata, - }, - &mut self.temp, - ); + let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + unsafe { + insert_or_barrier_update( + texture_data, + Some(&mut self.start_set), + &mut self.end_set, + &mut self.metadata, + index32, + index, + TextureStateProvider::TextureSet { set: &scope.set }, + None, + ResourceMetadataProvider::Indirect { + metadata: &scope.metadata, + }, + &mut self.temp, + ) + }; - scope.metadata.reset(index); + unsafe { scope.metadata.remove(index) }; } } @@ -706,21 +709,21 @@ impl TextureTracker { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - if index > self.metadata.owned.len() { + if index > self.metadata.size() { return false; } self.tracker_assert_in_bounds(index); unsafe { - if self.metadata.owned.get(index).unwrap_unchecked() { - let existing_epoch = *self.metadata.epochs.get_unchecked_mut(index); + if self.metadata.contains_unchecked(index) { + let existing_epoch = self.metadata.get_epoch_unchecked(index); assert_eq!(existing_epoch, epoch); self.start_set.complex.remove(&index32); self.end_set.complex.remove(&index32); - self.metadata.reset(index); + self.metadata.remove(index); return true; } @@ -740,24 +743,22 @@ impl TextureTracker { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; - if index > self.metadata.owned.len() { + if index > self.metadata.size() { return false; } self.tracker_assert_in_bounds(index); unsafe { - if self.metadata.owned.get(index).unwrap_unchecked() { - let existing_epoch = self.metadata.epochs.get_unchecked_mut(index); - let existing_ref_count = self.metadata.ref_counts.get_unchecked_mut(index); + if self.metadata.contains_unchecked(index) { + let existing_epoch = self.metadata.get_epoch_unchecked(index); + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - if *existing_epoch == epoch - && existing_ref_count.as_mut().unwrap_unchecked().load() == 1 - { + if existing_epoch == epoch && existing_ref_count.load() == 1 { self.start_set.complex.remove(&index32); self.end_set.complex.remove(&index32); - self.metadata.reset(index); + self.metadata.remove(index); return true; } @@ -854,10 +855,10 @@ impl<'a> TextureStateProvider<'a> { } } TextureStateProvider::TextureSet { set } => { - let new_state = *set.simple.get_unchecked(index); + let new_state = *unsafe { set.simple.get_unchecked(index) }; if new_state == TextureUses::COMPLEX { - let new_complex = set.complex.get(&index32).unwrap_unchecked(); + let new_complex = unsafe { set.complex.get(&index32).unwrap_unchecked() }; SingleOrManyStates::Many(EitherIter::Right( new_complex.to_selector_state_iter(), @@ -877,7 +878,7 @@ unsafe fn texture_data_from_texture( storage: &hub::Storage, TextureId>, index32: u32, ) -> (&LifeGuard, &TextureSelector) { - let texture = storage.get_unchecked(index32); + let texture = unsafe { storage.get_unchecked(index32) }; (&texture.life_guard, &texture.full_range) } @@ -900,31 +901,35 @@ unsafe fn insert_or_merge( state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { - insert( - Some(texture_data), - None, + unsafe { + insert( + Some(texture_data), + None, + current_state_set, + resource_metadata, + index32, + index, + state_provider, + None, + metadata_provider, + ) + }; + return Ok(()); + } + + unsafe { + merge( + texture_data, current_state_set, - resource_metadata, index32, index, state_provider, - None, metadata_provider, - ); - return Ok(()); + ) } - - merge( - texture_data, - current_state_set, - index32, - index, - state_provider, - metadata_provider, - ) } /// If the resource isn't tracked @@ -957,42 +962,48 @@ unsafe fn insert_or_barrier_update( metadata_provider: ResourceMetadataProvider<'_, A>, barriers: &mut Vec>, ) { - let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked(); + let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { - insert( - Some(texture_data), - start_state, - current_state_set, - resource_metadata, - index32, - index, - start_state_provider, - end_state_provider, - metadata_provider, - ); + unsafe { + insert( + Some(texture_data), + start_state, + current_state_set, + resource_metadata, + index32, + index, + start_state_provider, + end_state_provider, + metadata_provider, + ) + }; return; } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - barrier( - texture_data, - current_state_set, - index32, - index, - start_state_provider, - barriers, - ); + unsafe { + barrier( + texture_data, + current_state_set, + index32, + index, + start_state_provider, + barriers, + ) + }; let start_state_set = start_state.unwrap(); - update( - texture_data, - start_state_set, - current_state_set, - index32, - index, - update_state_provider, - ); + unsafe { + update( + texture_data, + start_state_set, + current_state_set, + index32, + index, + update_state_provider, + ) + }; } #[inline(always)] @@ -1007,7 +1018,7 @@ unsafe fn insert( end_state_provider: Option>, metadata_provider: ResourceMetadataProvider<'_, A>, ) { - let start_layers = start_state_provider.get_state(texture_data, index32, index); + let start_layers = unsafe { start_state_provider.get_state(texture_data, index32, index) }; match start_layers { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double @@ -1017,36 +1028,37 @@ unsafe fn insert( log::trace!("\ttex {index32}: insert start {state:?}"); if let Some(start_state) = start_state { - *start_state.simple.get_unchecked_mut(index) = state; + unsafe { *start_state.simple.get_unchecked_mut(index) = state }; } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { - *end_state.simple.get_unchecked_mut(index) = state; + unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } } SingleOrManyStates::Many(state_iter) => { let full_range = texture_data.unwrap().1.clone(); - let complex = ComplexTextureState::from_selector_state_iter(full_range, state_iter); + let complex = + unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; log::trace!("\ttex {index32}: insert start {complex:?}"); if let Some(start_state) = start_state { - *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; start_state.complex.insert(index32, complex.clone()); } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { - *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; end_state.complex.insert(index32, complex); } } } if let Some(end_state_provider) = end_state_provider { - match end_state_provider.get_state(texture_data, index32, index) { + match unsafe { end_state_provider.get_state(texture_data, index32, index) } { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. @@ -1056,29 +1068,30 @@ unsafe fn insert( // We only need to insert into the end, as there is guarenteed to be // a start state provider. - *end_state.simple.get_unchecked_mut(index) = state; + unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } SingleOrManyStates::Many(state_iter) => { let full_range = texture_data.unwrap().1.clone(); - let complex = ComplexTextureState::from_selector_state_iter(full_range, state_iter); + let complex = unsafe { + ComplexTextureState::from_selector_state_iter(full_range, state_iter) + }; log::trace!("\ttex {index32}: insert end {complex:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. - *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX; + unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; end_state.complex.insert(index32, complex); } } } - let (epoch, ref_count) = - metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index); - - resource_metadata.owned.set(index, true); - *resource_metadata.epochs.get_unchecked_mut(index) = epoch; - *resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count); + unsafe { + let (epoch, ref_count) = + metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index); + resource_metadata.insert(index, epoch, ref_count); + } } #[inline(always)] @@ -1090,19 +1103,19 @@ unsafe fn merge( state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A>, ) -> Result<(), UsageConflict> { - let current_simple = current_state_set.simple.get_unchecked_mut(index); + let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many( + SingleOrManyStates::Many(unsafe { current_state_set .complex .get_mut(&index32) - .unwrap_unchecked(), - ) + .unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1112,7 +1125,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), texture_data.1.clone(), *current_simple, new_simple, @@ -1122,12 +1139,15 @@ unsafe fn merge( *current_simple = merged_state; } (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Many(new_many)) => { - // Because we are now demoting this simple state to a complex state, we actually need to make a whole - // new complex state for us to use as there wasn't one before. - let mut new_complex = ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), - ); + // Because we are now demoting this simple state to a complex state, + // we actually need to make a whole new complex state for us to use + // as there wasn't one before. + let mut new_complex = unsafe { + ComplexTextureState::from_selector_state_iter( + texture_data.1.clone(), + iter::once((texture_data.1.clone(), *current_simple)), + ) + }; for (selector, new_state) in new_many { let merged_state = *current_simple | new_state; @@ -1138,7 +1158,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), selector, *current_simple, new_state, @@ -1168,7 +1192,8 @@ unsafe fn merge( for &mut (ref layers, ref mut current_layer_state) in mip.iter_mut() { let merged_state = *current_layer_state | new_simple; - // Once we remove unknown, this will never be empty, as simple states are never unknown. + // Once we remove unknown, this will never be empty, as + // simple states are never unknown. let merged_state = merged_state - TextureUses::UNKNOWN; log::trace!( @@ -1178,7 +1203,11 @@ unsafe fn merge( if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( - TextureId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT), + TextureId::zip( + index32, + unsafe { metadata_provider.get_epoch(index) }, + A::VARIANT, + ), TextureSelector { mips: mip_id..mip_id + 1, layers: layers.clone(), @@ -1199,7 +1228,7 @@ unsafe fn merge( for mip_id in selector.mips { strict_assert!((mip_id as usize) < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked_mut(mip_id as usize); + let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id as usize) }; for &mut (ref layers, ref mut current_layer_state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) @@ -1221,7 +1250,7 @@ unsafe fn merge( return Err(UsageConflict::from_texture( TextureId::zip( index32, - metadata_provider.get_epoch(index), + unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), TextureSelector { @@ -1252,14 +1281,16 @@ unsafe fn barrier( state_provider: TextureStateProvider<'_>, barriers: &mut Vec>, ) { - let current_simple = *current_state_set.simple.get_unchecked(index); + let current_simple = unsafe { *current_state_set.simple.get_unchecked(index) }; let current_state = if current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many(current_state_set.complex.get(&index32).unwrap_unchecked()) + SingleOrManyStates::Many(unsafe { + current_state_set.complex.get(&index32).unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1330,7 +1361,7 @@ unsafe fn barrier( for mip_id in selector.mips { strict_assert!((mip_id as usize) < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked(mip_id as usize); + let mip = unsafe { current_complex.mips.get_unchecked(mip_id as usize) }; for (layers, current_layer_state) in mip.iter_filter(&selector.layers) { if *current_layer_state == TextureUses::UNKNOWN @@ -1373,41 +1404,45 @@ unsafe fn update( index: usize, state_provider: TextureStateProvider<'_>, ) { - let start_simple = *start_state_set.simple.get_unchecked(index); + let start_simple = unsafe { *start_state_set.simple.get_unchecked(index) }; // We only ever need to update the start state here if the state is complex. // // If the state is simple, the first insert to the tracker would cover it. let mut start_complex = None; if start_simple == TextureUses::COMPLEX { - start_complex = Some(start_state_set.complex.get_mut(&index32).unwrap_unchecked()); + start_complex = + Some(unsafe { start_state_set.complex.get_mut(&index32).unwrap_unchecked() }); } - let current_simple = current_state_set.simple.get_unchecked_mut(index); + let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { - SingleOrManyStates::Many( + SingleOrManyStates::Many(unsafe { current_state_set .complex .get_mut(&index32) - .unwrap_unchecked(), - ) + .unwrap_unchecked() + }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = state_provider.get_state(Some(texture_data), index32, index); + let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { *current_simple = new_simple; } (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Many(new_many)) => { - // Because we are now demoting this simple state to a complex state, we actually need to make a whole - // new complex state for us to use as there wasn't one before. - let mut new_complex = ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), - ); + // Because we are now demoting this simple state to a complex state, + // we actually need to make a whole new complex state for us to use + // as there wasn't one before. + let mut new_complex = unsafe { + ComplexTextureState::from_selector_state_iter( + texture_data.1.clone(), + iter::once((texture_data.1.clone(), *current_simple)), + ) + }; for (selector, mut new_state) in new_many { if new_state == TextureUses::UNKNOWN { @@ -1437,7 +1472,7 @@ unsafe fn update( if let Some(&mut ref mut start_complex) = start_complex { strict_assert!(mip_id < start_complex.mips.len()); - let start_mip = start_complex.mips.get_unchecked_mut(mip_id); + let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) }; for &mut (_, ref mut current_start_state) in start_mip.isolate(layers, TextureUses::UNKNOWN) @@ -1452,11 +1487,13 @@ unsafe fn update( } } - *current_state_set.simple.get_unchecked_mut(index) = new_single; - current_state_set - .complex - .remove(&index32) - .unwrap_unchecked(); + unsafe { *current_state_set.simple.get_unchecked_mut(index) = new_single }; + unsafe { + current_state_set + .complex + .remove(&index32) + .unwrap_unchecked() + }; } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => { for (selector, new_state) in new_many { @@ -1469,7 +1506,7 @@ unsafe fn update( let mip_id = mip_id as usize; strict_assert!(mip_id < current_complex.mips.len()); - let mip = current_complex.mips.get_unchecked_mut(mip_id); + let mip = unsafe { current_complex.mips.get_unchecked_mut(mip_id) }; for &mut (ref layers, ref mut current_layer_state) in mip.isolate(&selector.layers, TextureUses::UNKNOWN) @@ -1477,18 +1514,20 @@ unsafe fn update( if *current_layer_state == TextureUses::UNKNOWN && new_state != TextureUses::UNKNOWN { - // We now know something about this subresource that we didn't before - // so we should go back and update the start state. - - // We know we must have starter state be complex, otherwise we would know - // about this state. + // We now know something about this subresource that + // we didn't before so we should go back and update + // the start state. + // + // We know we must have starter state be complex, + // otherwise we would know about this state. strict_assert!(start_complex.is_some()); - let start_complex = start_complex.as_deref_mut().unwrap_unchecked(); + let start_complex = + unsafe { start_complex.as_deref_mut().unwrap_unchecked() }; strict_assert!(mip_id < start_complex.mips.len()); - let start_mip = start_complex.mips.get_unchecked_mut(mip_id); + let start_mip = unsafe { start_complex.mips.get_unchecked_mut(mip_id) }; for &mut (_, ref mut current_start_state) in start_mip.isolate(layers, TextureUses::UNKNOWN) diff --git a/wgpu-core/src/validation.rs b/wgpu-core/src/validation.rs index a6dee7d974..3fa41032d1 100644 --- a/wgpu-core/src/validation.rs +++ b/wgpu-core/src/validation.rs @@ -236,10 +236,11 @@ pub enum StageError { #[error("shader module is invalid")] InvalidModule, #[error( - "shader entry point current workgroup size {current:?} must be less or equal to {limit:?} of total {total}" + "shader entry point's workgroup size {current:?} ({current_total} total invocations) must be less or equal to the per-dimension limit {limit:?} and the total invocation limit {total}" )] InvalidWorkgroupSize { current: [u32; 3], + current_total: u32, limit: [u32; 3], total: u32, }, @@ -1098,6 +1099,7 @@ impl Interface { { return Err(StageError::InvalidWorkgroupSize { current: entry_point.workgroup_size, + current_total: total_invocations, limit: max_workgroup_size_limits, total: self.limits.max_compute_invocations_per_workgroup, }); @@ -1166,7 +1168,8 @@ impl Interface { // Check all vertex outputs and make sure the fragment shader consumes them. if shader_stage == naga::ShaderStage::Fragment { for &index in inputs.keys() { - // This is a linear scan, but the count should be low enough that this should be fine. + // This is a linear scan, but the count should be low enough + // that this should be fine. let found = entry_point.inputs.iter().any(|v| match *v { Varying::Local { location, .. } => location == index, Varying::BuiltIn(_) => false, diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index b68d30f13e..aa8bbcada9 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -1,19 +1,33 @@ [package] name = "wgpu-hal" -version = "0.14.0" -authors = ["wgpu developers"] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "WebGPU hardware abstraction layer" -homepage = "https://github.com/gfx-rs/wgpu" -repository = "https://github.com/gfx-rs/wgpu" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true + +# Override the workspace's `rust-version` key. Firefox uses `cargo vendor` to +# copy the crates it actually uses out of the workspace, so it's meaningful for +# them to have less restrictive MSRVs individually than the workspace as a +# whole, if their code permits. See `../README.md` for details. rust-version = "1.60" +[package.metadata.docs.rs] +# Ideally we would enable all the features. +# +# However the metal features fails to be documented because the docs.rs runner cross compiling under +# x86_64-unknown-linux-gnu and metal-rs cannot compile in that environment at the moment. The same applies +# with the dx11 and dx12 features. +features = ["vulkan", "gles", "renderdoc"] +rustdoc-args = ["--cfg", "docsrs"] + [lib] [features] -default = [] +default = ["gles"] metal = ["naga/msl-out", "block", "foreign-types"] vulkan = ["naga/spv-out", "ash", "gpu-alloc", "gpu-descriptor", "libloading", "smallvec"] gles = ["naga/glsl-out", "glow", "egl", "libloading"] @@ -30,88 +44,76 @@ name = "raw-gles" required-features = ["gles"] [dependencies] -bitflags = "1.0" -# parking_lot 0.12 switches from `winapi` to `windows`; permit either -parking_lot = ">=0.11,<0.13" -profiling = { version = "1", default-features = false } -raw-window-handle = "0.5" -thiserror = "1" +bitflags.workspace = true +parking_lot.workspace = true +profiling.workspace = true +raw-window-handle.workspace = true +thiserror.workspace = true # backends common -arrayvec = "0.7" -fxhash = "0.2.1" -log = "0.4" -renderdoc-sys = { version = "0.7.1", optional = true } +arrayvec.workspace = true +fxhash.workspace = true +log.workspace = true +renderdoc-sys = { workspace = true, optional = true } # backend: Metal -block = { version = "0.1", optional = true } -foreign-types = { version = "0.3", optional = true } +block = { workspace = true, optional = true } +foreign-types = { workspace = true, optional = true } # backend: Vulkan -ash = { version = "0.37", optional = true } -gpu-alloc = { version = "0.5", optional = true } -gpu-descriptor = { version = "0.2", optional = true } -smallvec = { version = "1", optional = true, features = ["union"] } +ash = { workspace = true, optional = true } +gpu-alloc = { workspace = true, optional = true } +gpu-descriptor = { workspace = true, optional = true } +smallvec = { workspace = true, optional = true, features = ["union"] } # backend: Gles -#glow = { version = "0.11.2", optional = true } -# TODO: New glow release -glow = { git = "https://github.com/grovesNL/glow/", rev = "c8a011fcd57a5c68cc917ed394baa484bdefc909", optional = true } +glow = { workspace = true, optional = true } # backend: Dx12 -bit-set = { version = "0.5", optional = true } -range-alloc = { version = "0.1", optional = true } +bit-set = { workspace = true, optional = true } +range-alloc = { workspace = true, optional = true } [dependencies.wgt] -package = "wgpu-types" -path = "../wgpu-types" -version = "0.14" +workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -egl = { package = "khronos-egl", version = "4.1", features = ["dynamic"], optional = true } -#Note: it's only unused on Apple platforms -libloading = { version = "0.7", optional = true } +egl = { workspace = true, features = ["dynamic"], optional = true } +libloading = { workspace = true, optional = true } [target.'cfg(target_os = "emscripten")'.dependencies] -egl = { package = "khronos-egl", version = "4.1", features = ["static", "no-pkg-config"] } +egl = { workspace = true, features = ["static", "no-pkg-config"] } #Note: it's unused by emscripten, but we keep it to have single code base in egl.rs -libloading = { version = "0.7", optional = true } +libloading = { workspace = true, optional = true } [target.'cfg(windows)'.dependencies] -winapi = { version = "0.3", features = ["libloaderapi", "windef", "winuser", "dcomp"] } -native = { package = "d3d12", version = "0.5.0", features = ["libloading"], optional = true } -# native = { package = "d3d12", git = "https://github.com/gfx-rs/d3d12-rs.git", rev = "ffe5e261da0a6cb85332b82ab310abd2a7e849f6", features = ["libloading"], optional = true } +winapi = { workspace = true, features = ["libloaderapi", "windef", "winuser", "dcomp"] } +native = { workspace = true, features = ["libloading"], optional = true } [target.'cfg(any(target_os="macos", target_os="ios"))'.dependencies] -mtl = { package = "metal", version = "0.24.0" } -# mtl = { package = "metal", git = "https://github.com/gfx-rs/metal-rs", rev = "1aaa903" } -objc = "0.2.5" -core-graphics-types = "0.1" +mtl.workspace = true +objc.workspace = true +core-graphics-types.workspace = true [target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies] -wasm-bindgen = { version = "0.2" } -web-sys = { version = "=0.3.60", features = ["Window", "HtmlCanvasElement", "WebGl2RenderingContext", "OffscreenCanvas"] } -js-sys = { version = "0.3" } +wasm-bindgen.workspace = true +web-sys = { workspace = true, features = ["Window", "HtmlCanvasElement", "WebGl2RenderingContext", "OffscreenCanvas"] } +js-sys.workspace = true [target.'cfg(target_os = "android")'.dependencies] -android_system_properties = "0.1.1" +android_system_properties.workspace = true [dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" -version = "0.10" +workspace = true features = ["clone"] # DEV dependencies - [dev-dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" +workspace = true features = ["wgsl-in"] [dev-dependencies] -env_logger = "0.9" -winit = "0.27.1" # for "halmark" example +env_logger.workspace = true +winit.workspace = true # for "halmark" example [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -glutin = "0.29.1" # for "gles" example +glutin.workspace = true # for "gles" example diff --git a/wgpu-hal/examples/raw-gles.rs b/wgpu-hal/examples/raw-gles.rs index 1620794fb0..4c793bba68 100644 --- a/wgpu-hal/examples/raw-gles.rs +++ b/wgpu-hal/examples/raw-gles.rs @@ -48,7 +48,7 @@ fn main() { *control_flow = ControlFlow::Wait; match event { - Event::LoopDestroyed => return, + Event::LoopDestroyed => (), Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested | WindowEvent::KeyboardInput { diff --git a/wgpu-hal/src/auxil/dxgi/exception.rs b/wgpu-hal/src/auxil/dxgi/exception.rs index 31d5e6933a..fceac7db5f 100644 --- a/wgpu-hal/src/auxil/dxgi/exception.rs +++ b/wgpu-hal/src/auxil/dxgi/exception.rs @@ -46,21 +46,23 @@ unsafe extern "system" fn output_debug_string_handler( exception_info: *mut winnt::EXCEPTION_POINTERS, ) -> i32 { // See https://stackoverflow.com/a/41480827 - let record = &*(*exception_info).ExceptionRecord; + let record = unsafe { &*(*exception_info).ExceptionRecord }; if record.NumberParameters != 2 { return excpt::EXCEPTION_CONTINUE_SEARCH; } let message = match record.ExceptionCode { - winnt::DBG_PRINTEXCEPTION_C => String::from_utf8_lossy(slice::from_raw_parts( - record.ExceptionInformation[1] as *const u8, - record.ExceptionInformation[0], - )), - winnt::DBG_PRINTEXCEPTION_WIDE_C => { - Cow::Owned(String::from_utf16_lossy(slice::from_raw_parts( + winnt::DBG_PRINTEXCEPTION_C => String::from_utf8_lossy(unsafe { + slice::from_raw_parts( + record.ExceptionInformation[1] as *const u8, + record.ExceptionInformation[0], + ) + }), + winnt::DBG_PRINTEXCEPTION_WIDE_C => Cow::Owned(String::from_utf16_lossy(unsafe { + slice::from_raw_parts( record.ExceptionInformation[1] as *const u16, record.ExceptionInformation[0], - ))) - } + ) + })), _ => return excpt::EXCEPTION_CONTINUE_SEARCH, }; diff --git a/wgpu-hal/src/auxil/renderdoc.rs b/wgpu-hal/src/auxil/renderdoc.rs index 712eac4180..b2e9242a89 100644 --- a/wgpu-hal/src/auxil/renderdoc.rs +++ b/wgpu-hal/src/auxil/renderdoc.rs @@ -44,12 +44,13 @@ impl RenderDoc { let renderdoc_filename = "libVkLayer_GLES_RenderDoc.so"; #[cfg(unix)] - let renderdoc_result: Result = + let renderdoc_result: Result = unsafe { libloading::os::unix::Library::open( Some(renderdoc_filename), libloading::os::unix::RTLD_NOW | RTLD_NOLOAD, ) - .map(|lib| lib.into()); + } + .map(|lib| lib.into()); #[cfg(windows)] let renderdoc_result: Result = @@ -68,22 +69,23 @@ impl RenderDoc { } }; - let get_api: libloading::Symbol = match renderdoc_lib.get(b"RENDERDOC_GetAPI\0") { - Ok(api) => api, - Err(e) => { - return RenderDoc::NotAvailable { - reason: format!( - "Unable to get RENDERDOC_GetAPI from renderdoc library '{}': {:?}", - renderdoc_filename, e - ), + let get_api: libloading::Symbol = + match unsafe { renderdoc_lib.get(b"RENDERDOC_GetAPI\0") } { + Ok(api) => api, + Err(e) => { + return RenderDoc::NotAvailable { + reason: format!( + "Unable to get RENDERDOC_GetAPI from renderdoc library '{}': {:?}", + renderdoc_filename, e + ), + } } - } - }; + }; let mut obj = ptr::null_mut(); - match get_api(10401, &mut obj) { + match unsafe { get_api(10401, &mut obj) } { 1 => RenderDoc::Available { api: RenderDocApi { - api: *(obj as *mut renderdoc_sys::RENDERDOC_API_1_4_1), + api: unsafe { *(obj as *mut renderdoc_sys::RENDERDOC_API_1_4_1) }, lib: renderdoc_lib, }, }, @@ -115,7 +117,7 @@ impl RenderDoc { pub unsafe fn start_frame_capture(&self, device_handle: Handle, window_handle: Handle) -> bool { match *self { Self::Available { api: ref entry } => { - entry.api.StartFrameCapture.unwrap()(device_handle, window_handle); + unsafe { entry.api.StartFrameCapture.unwrap()(device_handle, window_handle) }; true } Self::NotAvailable { ref reason } => { @@ -129,7 +131,7 @@ impl RenderDoc { pub unsafe fn end_frame_capture(&self, device_handle: Handle, window_handle: Handle) { match *self { Self::Available { api: ref entry } => { - entry.api.EndFrameCapture.unwrap()(device_handle, window_handle); + unsafe { entry.api.EndFrameCapture.unwrap()(device_handle, window_handle) }; } Self::NotAvailable { ref reason } => { log::warn!("Could not end RenderDoc frame capture: {}", reason) diff --git a/wgpu-hal/src/dx11/adapter.rs b/wgpu-hal/src/dx11/adapter.rs index 9ea3243ab8..d30ba8fa90 100644 --- a/wgpu-hal/src/dx11/adapter.rs +++ b/wgpu-hal/src/dx11/adapter.rs @@ -91,8 +91,9 @@ impl super::Adapter { | wgt::Features::CLEAR_TEXTURE | wgt::Features::TEXTURE_FORMAT_16BIT_NORM | wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO; - let mut downlevel = - wgt::DownlevelFlags::BASE_VERTEX | wgt::DownlevelFlags::READ_ONLY_DEPTH_STENCIL; + let mut downlevel = wgt::DownlevelFlags::BASE_VERTEX + | wgt::DownlevelFlags::READ_ONLY_DEPTH_STENCIL + | wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER; // Features from queries downlevel.set( diff --git a/wgpu-hal/src/dx11/device.rs b/wgpu-hal/src/dx11/device.rs index 7b095ba1df..3b087c4311 100644 --- a/wgpu-hal/src/dx11/device.rs +++ b/wgpu-hal/src/dx11/device.rs @@ -227,14 +227,16 @@ impl crate::Queue for super::Queue { impl super::D3D11Device { #[allow(trivial_casts)] // come on pub unsafe fn check_feature_support(&self, feature: d3d11::D3D11_FEATURE) -> T { - let mut value = mem::zeroed::(); - let ret = self.CheckFeatureSupport( - feature, - &mut value as *mut T as *mut c_void, - mem::size_of::() as u32, - ); - assert_eq!(ret.into_result(), Ok(())); - - value + unsafe { + let mut value = mem::zeroed::(); + let ret = self.CheckFeatureSupport( + feature, + &mut value as *mut T as *mut c_void, + mem::size_of::() as u32, + ); + assert_eq!(ret.into_result(), Ok(())); + + value + } } } diff --git a/wgpu-hal/src/dx12/adapter.rs b/wgpu-hal/src/dx12/adapter.rs index edbda11d2f..60efdffab4 100644 --- a/wgpu-hal/src/dx12/adapter.rs +++ b/wgpu-hal/src/dx12/adapter.rs @@ -2,9 +2,9 @@ use crate::{ auxil::{self, dxgi::result::HResult as _}, dx12::SurfaceTarget, }; -use std::{mem, sync::Arc, thread}; +use std::{mem, ptr, sync::Arc, thread}; use winapi::{ - shared::{dxgi, dxgi1_2, windef, winerror}, + shared::{dxgi, dxgi1_2, minwindef::DWORD, windef, winerror}, um::{d3d12, d3d12sdklayers, winuser}, }; @@ -29,15 +29,17 @@ impl Drop for super::Adapter { impl super::Adapter { pub unsafe fn report_live_objects(&self) { - if let Ok(debug_device) = self - .raw - .cast::() - .into_result() - { - debug_device.ReportLiveDeviceObjects( - d3d12sdklayers::D3D12_RLDO_SUMMARY | d3d12sdklayers::D3D12_RLDO_IGNORE_INTERNAL, - ); - debug_device.destroy(); + if let Ok(debug_device) = unsafe { + self.raw + .cast::() + .into_result() + } { + unsafe { + debug_device.ReportLiveDeviceObjects( + d3d12sdklayers::D3D12_RLDO_SUMMARY | d3d12sdklayers::D3D12_RLDO_IGNORE_INTERNAL, + ) + }; + unsafe { debug_device.destroy() }; } } @@ -191,7 +193,6 @@ impl super::Adapter { let mut features = wgt::Features::empty() | wgt::Features::DEPTH_CLIP_CONTROL - | wgt::Features::DEPTH24PLUS_STENCIL8 | wgt::Features::DEPTH32FLOAT_STENCIL8 | wgt::Features::INDIRECT_FIRST_INSTANCE | wgt::Features::MAPPABLE_PRIMARY_BUFFERS @@ -366,35 +367,34 @@ impl crate::Adapter for super::Adapter { let mut data = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: raw_format, - Support1: mem::zeroed(), - Support2: mem::zeroed(), + Support1: unsafe { mem::zeroed() }, + Support2: unsafe { mem::zeroed() }, }; - assert_eq!( - winerror::S_OK, + assert_eq!(winerror::S_OK, unsafe { self.device.CheckFeatureSupport( d3d12::D3D12_FEATURE_FORMAT_SUPPORT, &mut data as *mut _ as *mut _, mem::size_of::() as _, ) - ); + }); // Because we use a different format for SRV and UAV views of depth textures, we need to check // the features that use SRV/UAVs using the no-depth format. let mut data_no_depth = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: no_depth_format, - Support1: mem::zeroed(), - Support2: mem::zeroed(), + Support1: d3d12::D3D12_FORMAT_SUPPORT1_NONE, + Support2: d3d12::D3D12_FORMAT_SUPPORT2_NONE, }; if raw_format != no_depth_format { // Only-recheck if we're using a different format - assert_eq!( - winerror::S_OK, + assert_eq!(winerror::S_OK, unsafe { self.device.CheckFeatureSupport( d3d12::D3D12_FEATURE_FORMAT_SUPPORT, - &mut data_no_depth as *mut _ as *mut _, - mem::size_of::() as _, + ptr::addr_of_mut!(data_no_depth).cast(), + DWORD::try_from(mem::size_of::()) + .unwrap(), ) - ); + }); } else { // Same format, just copy over. data_no_depth = data; @@ -447,12 +447,39 @@ impl crate::Adapter for super::Adapter { | d3d12::D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL) != 0 && data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_MULTISAMPLE_RENDERTARGET == 0; - caps.set(Tfc::MULTISAMPLE, !no_msaa_load && !no_msaa_target); + caps.set( Tfc::MULTISAMPLE_RESOLVE, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_MULTISAMPLE_RESOLVE != 0, ); + let mut ms_levels = d3d12::D3D12_FEATURE_DATA_MULTISAMPLE_QUALITY_LEVELS { + Format: raw_format, + SampleCount: 0, + Flags: d3d12::D3D12_MULTISAMPLE_QUALITY_LEVELS_FLAG_NONE, + NumQualityLevels: 0, + }; + + let mut set_sample_count = |sc: u32, tfc: Tfc| { + ms_levels.SampleCount = sc; + + if unsafe { + self.device.CheckFeatureSupport( + d3d12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, + <*mut _>::cast(&mut ms_levels), + mem::size_of::() as _, + ) + } == winerror::S_OK + && ms_levels.NumQualityLevels != 0 + { + caps.set(tfc, !no_msaa_load && !no_msaa_target); + } + }; + + set_sample_count(2, Tfc::MULTISAMPLE_X2); + set_sample_count(4, Tfc::MULTISAMPLE_X4); + set_sample_count(8, Tfc::MULTISAMPLE_X8); + caps } @@ -463,8 +490,8 @@ impl crate::Adapter for super::Adapter { let current_extent = { match surface.target { SurfaceTarget::WndHandle(wnd_handle) => { - let mut rect: windef::RECT = mem::zeroed(); - if winuser::GetClientRect(wnd_handle, &mut rect) != 0 { + let mut rect: windef::RECT = unsafe { mem::zeroed() }; + if unsafe { winuser::GetClientRect(wnd_handle, &mut rect) } != 0 { Some(wgt::Extent3d { width: (rect.right - rect.left) as u32, height: (rect.bottom - rect.top) as u32, diff --git a/wgpu-hal/src/dx12/command.rs b/wgpu-hal/src/dx12/command.rs index daeaa96e11..9f879e8b63 100644 --- a/wgpu-hal/src/dx12/command.rs +++ b/wgpu-hal/src/dx12/command.rs @@ -64,7 +64,7 @@ impl super::CommandEncoder { self.pass.kind = kind; if let Some(label) = label { let (wide_label, size) = self.temp.prepare_marker(label); - list.BeginEvent(0, wide_label.as_ptr() as *const _, size); + unsafe { list.BeginEvent(0, wide_label.as_ptr() as *const _, size) }; self.pass.has_label = true; } self.pass.dirty_root_elements = 0; @@ -76,7 +76,7 @@ impl super::CommandEncoder { let list = self.list.unwrap(); list.set_descriptor_heaps(&[]); if self.pass.has_label { - list.EndEvent(); + unsafe { list.EndEvent() }; } self.pass.clear(); } @@ -86,11 +86,13 @@ impl super::CommandEncoder { let list = self.list.unwrap(); let index = self.pass.dirty_vertex_buffers.trailing_zeros(); self.pass.dirty_vertex_buffers ^= 1 << index; - list.IASetVertexBuffers( - index, - 1, - self.pass.vertex_buffers.as_ptr().offset(index as isize), - ); + unsafe { + list.IASetVertexBuffers( + index, + 1, + self.pass.vertex_buffers.as_ptr().offset(index as isize), + ); + } } if let Some(root_index) = self.pass.layout.special_constants_root_index { let needs_update = match self.pass.root_elements[root_index as usize] { @@ -244,7 +246,7 @@ impl crate::CommandEncoder for super::CommandEncoder { if let Some(label) = label { let cwstr = conv::map_label(label); - list.SetName(cwstr.as_ptr()); + unsafe { list.SetName(cwstr.as_ptr()) }; } self.list = Some(list); @@ -290,32 +292,38 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: barrier.buffer.resource.as_mut_ptr(), - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: s0, - StateAfter: s1, + unsafe { + *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: barrier.buffer.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: s0, + StateAfter: s1, + } }; self.temp.barriers.push(raw); } else if barrier.usage.start == crate::BufferUses::STORAGE_READ_WRITE { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { - pResource: barrier.buffer.resource.as_mut_ptr(), + unsafe { + *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: barrier.buffer.resource.as_mut_ptr(), + } }; self.temp.barriers.push(raw); } } if !self.temp.barriers.is_empty() { - self.list - .unwrap() - .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + self.list + .unwrap() + .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()) + }; } } @@ -340,13 +348,15 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: barrier.texture.resource.as_mut_ptr(), - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: s0, - StateAfter: s1, + unsafe { + *raw.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: barrier.texture.resource.as_mut_ptr(), + Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, + StateBefore: s0, + StateAfter: s1, + } }; let mip_level_count = match barrier.range.mip_level_count { @@ -383,12 +393,14 @@ impl crate::CommandEncoder for super::CommandEncoder { for rel_mip_level in 0..mip_level_count { for rel_array_layer in 0..array_layer_count { for plane in planes.clone() { - raw.u.Transition_mut().Subresource = - barrier.texture.calc_subresource( - barrier.range.base_mip_level + rel_mip_level, - barrier.range.base_array_layer + rel_array_layer, - plane, - ); + unsafe { + raw.u.Transition_mut().Subresource = + barrier.texture.calc_subresource( + barrier.range.base_mip_level + rel_mip_level, + barrier.range.base_array_layer + rel_array_layer, + plane, + ); + }; self.temp.barriers.push(raw); } } @@ -398,19 +410,23 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut raw = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { - pResource: barrier.texture.resource.as_mut_ptr(), + unsafe { + *raw.u.UAV_mut() = d3d12::D3D12_RESOURCE_UAV_BARRIER { + pResource: barrier.texture.resource.as_mut_ptr(), + } }; self.temp.barriers.push(raw); } } if !self.temp.barriers.is_empty() { - self.list - .unwrap() - .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + self.list + .unwrap() + .ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()) + }; } } @@ -419,13 +435,15 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut offset = range.start; while offset < range.end { let size = super::ZERO_BUFFER_SIZE.min(range.end - offset); - list.CopyBufferRegion( - buffer.resource.as_mut_ptr(), - offset, - self.shared.zero_buffer.as_mut_ptr(), - 0, - size, - ); + unsafe { + list.CopyBufferRegion( + buffer.resource.as_mut_ptr(), + offset, + self.shared.zero_buffer.as_mut_ptr(), + 0, + size, + ) + }; offset += size; } } @@ -440,13 +458,15 @@ impl crate::CommandEncoder for super::CommandEncoder { { let list = self.list.unwrap(); for r in regions { - list.CopyBufferRegion( - dst.resource.as_mut_ptr(), - r.dst_offset, - src.resource.as_mut_ptr(), - r.src_offset, - r.size.get(), - ); + unsafe { + list.CopyBufferRegion( + dst.resource.as_mut_ptr(), + r.dst_offset, + src.resource.as_mut_ptr(), + r.src_offset, + r.size.get(), + ) + }; } } @@ -463,27 +483,33 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&r.src_base.origin, &r.size); - *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.src_base); - *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.dst_base); - - list.CopyTextureRegion( - &dst_location, - r.dst_base.origin.x, - r.dst_base.origin.y, - r.dst_base.origin.z, - &src_location, - &src_box, - ); + unsafe { + *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.src_base) + }; + unsafe { + *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.dst_base) + }; + + unsafe { + list.CopyTextureRegion( + &dst_location, + r.dst_base.origin.x, + r.dst_base.origin.y, + r.dst_base.origin.z, + &src_location, + &src_box, + ) + }; } } @@ -499,25 +525,32 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&wgt::Origin3d::ZERO, &r.size); - *src_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(dst.format); - *dst_location.u.SubresourceIndex_mut() = dst.calc_subresource_for_copy(&r.texture_base); - list.CopyTextureRegion( - &dst_location, - r.texture_base.origin.x, - r.texture_base.origin.y, - r.texture_base.origin.z, - &src_location, - &src_box, - ); + unsafe { + *src_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(dst.format) + }; + unsafe { + *dst_location.u.SubresourceIndex_mut() = + dst.calc_subresource_for_copy(&r.texture_base) + }; + unsafe { + list.CopyTextureRegion( + &dst_location, + r.texture_base.origin.x, + r.texture_base.origin.y, + r.texture_base.origin.z, + &src_location, + &src_box, + ) + }; } } @@ -534,37 +567,48 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut src_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: src.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; let mut dst_location = d3d12::D3D12_TEXTURE_COPY_LOCATION { pResource: dst.resource.as_mut_ptr(), Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; for r in regions { let src_box = make_box(&r.texture_base.origin, &r.size); - *src_location.u.SubresourceIndex_mut() = src.calc_subresource_for_copy(&r.texture_base); - *dst_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(src.format); - list.CopyTextureRegion(&dst_location, 0, 0, 0, &src_location, &src_box); + unsafe { + *src_location.u.SubresourceIndex_mut() = + src.calc_subresource_for_copy(&r.texture_base) + }; + unsafe { + *dst_location.u.PlacedFootprint_mut() = r.to_subresource_footprint(src.format) + }; + unsafe { list.CopyTextureRegion(&dst_location, 0, 0, 0, &src_location, &src_box) }; } } unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) { - self.list - .unwrap() - .BeginQuery(set.raw.as_mut_ptr(), set.raw_ty, index); + unsafe { + self.list + .unwrap() + .BeginQuery(set.raw.as_mut_ptr(), set.raw_ty, index) + }; } unsafe fn end_query(&mut self, set: &super::QuerySet, index: u32) { - self.list - .unwrap() - .EndQuery(set.raw.as_mut_ptr(), set.raw_ty, index); + unsafe { + self.list + .unwrap() + .EndQuery(set.raw.as_mut_ptr(), set.raw_ty, index) + }; } unsafe fn write_timestamp(&mut self, set: &super::QuerySet, index: u32) { - self.list.unwrap().EndQuery( - set.raw.as_mut_ptr(), - d3d12::D3D12_QUERY_TYPE_TIMESTAMP, - index, - ); + unsafe { + self.list.unwrap().EndQuery( + set.raw.as_mut_ptr(), + d3d12::D3D12_QUERY_TYPE_TIMESTAMP, + index, + ) + }; } unsafe fn reset_queries(&mut self, _set: &super::QuerySet, _range: Range) { // nothing to do here @@ -577,20 +621,22 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, _stride: wgt::BufferSize, ) { - self.list.unwrap().ResolveQueryData( - set.raw.as_mut_ptr(), - set.raw_ty, - range.start, - range.end - range.start, - buffer.resource.as_mut_ptr(), - offset, - ); + unsafe { + self.list.unwrap().ResolveQueryData( + set.raw.as_mut_ptr(), + set.raw_ty, + range.start, + range.end - range.start, + buffer.resource.as_mut_ptr(), + offset, + ) + }; } // render unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor) { - self.begin_pass(super::PassKind::Render, desc.label); + unsafe { self.begin_pass(super::PassKind::Render, desc.label) }; let mut color_views = [native::CpuDescriptor { ptr: 0 }; crate::MAX_COLOR_ATTACHMENTS]; for (rtv, cat) in color_views.iter_mut().zip(desc.color_attachments.iter()) { if let Some(cat) = cat.as_ref() { @@ -612,12 +658,14 @@ impl crate::CommandEncoder for super::CommandEncoder { }; let list = self.list.unwrap(); - list.OMSetRenderTargets( - desc.color_attachments.len() as u32, - color_views.as_ptr(), - 0, - ds_view, - ); + unsafe { + list.OMSetRenderTargets( + desc.color_attachments.len() as u32, + color_views.as_ptr(), + 0, + ds_view, + ) + }; self.pass.resolves.clear(); for (rtv, cat) in color_views.iter().zip(desc.color_attachments.iter()) { @@ -657,7 +705,7 @@ impl crate::CommandEncoder for super::CommandEncoder { if !ds_view.is_null() && !flags.is_empty() { list.clear_depth_stencil_view( - *ds_view, + unsafe { *ds_view }, flags, ds.clear_value.0, ds.clear_value.1 as u8, @@ -680,8 +728,8 @@ impl crate::CommandEncoder for super::CommandEncoder { right: desc.extent.width as i32, bottom: desc.extent.height as i32, }; - list.RSSetViewports(1, &raw_vp); - list.RSSetScissorRects(1, &raw_rect); + unsafe { list.RSSetViewports(1, &raw_vp) }; + unsafe { list.RSSetScissorRects(1, &raw_rect) }; } unsafe fn end_render_pass(&mut self) { @@ -695,54 +743,70 @@ impl crate::CommandEncoder for super::CommandEncoder { let mut barrier = d3d12::D3D12_RESOURCE_BARRIER { Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResove`. - *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: resolve.src.0.as_mut_ptr(), - Subresource: resolve.src.1, - StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, - StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE, + unsafe { + *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: resolve.src.0.as_mut_ptr(), + Subresource: resolve.src.1, + StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_SOURCE, + } }; self.temp.barriers.push(barrier); - *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: resolve.dst.0.as_mut_ptr(), - Subresource: resolve.dst.1, - StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, - StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + unsafe { + *barrier.u.Transition_mut() = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { + pResource: resolve.dst.0.as_mut_ptr(), + Subresource: resolve.dst.1, + StateBefore: d3d12::D3D12_RESOURCE_STATE_RENDER_TARGET, + StateAfter: d3d12::D3D12_RESOURCE_STATE_RESOLVE_DEST, + } }; self.temp.barriers.push(barrier); } if !self.temp.barriers.is_empty() { profiling::scope!("ID3D12GraphicsCommandList::ResourceBarrier"); - list.ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + list.ResourceBarrier( + self.temp.barriers.len() as u32, + self.temp.barriers.as_ptr(), + ) + }; } for resolve in self.pass.resolves.iter() { profiling::scope!("ID3D12GraphicsCommandList::ResolveSubresource"); - list.ResolveSubresource( - resolve.dst.0.as_mut_ptr(), - resolve.dst.1, - resolve.src.0.as_mut_ptr(), - resolve.src.1, - resolve.format, - ); + unsafe { + list.ResolveSubresource( + resolve.dst.0.as_mut_ptr(), + resolve.dst.1, + resolve.src.0.as_mut_ptr(), + resolve.src.1, + resolve.format, + ) + }; } // Flip all the barriers to reverse, back into `COLOR_TARGET`. for barrier in self.temp.barriers.iter_mut() { - let transition = barrier.u.Transition_mut(); + let transition = unsafe { barrier.u.Transition_mut() }; mem::swap(&mut transition.StateBefore, &mut transition.StateAfter); } if !self.temp.barriers.is_empty() { profiling::scope!("ID3D12GraphicsCommandList::ResourceBarrier"); - list.ResourceBarrier(self.temp.barriers.len() as u32, self.temp.barriers.as_ptr()); + unsafe { + list.ResourceBarrier( + self.temp.barriers.len() as u32, + self.temp.barriers.as_ptr(), + ) + }; } } - self.end_pass(); + unsafe { self.end_pass() }; } unsafe fn set_bind_group( @@ -818,18 +882,22 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn insert_debug_marker(&mut self, label: &str) { let (wide_label, size) = self.temp.prepare_marker(label); - self.list - .unwrap() - .SetMarker(0, wide_label.as_ptr() as *const _, size); + unsafe { + self.list + .unwrap() + .SetMarker(0, wide_label.as_ptr() as *const _, size) + }; } unsafe fn begin_debug_marker(&mut self, group_label: &str) { let (wide_label, size) = self.temp.prepare_marker(group_label); - self.list - .unwrap() - .BeginEvent(0, wide_label.as_ptr() as *const _, size); + unsafe { + self.list + .unwrap() + .BeginEvent(0, wide_label.as_ptr() as *const _, size) + }; } unsafe fn end_debug_marker(&mut self) { - self.list.unwrap().EndEvent() + unsafe { self.list.unwrap().EndEvent() } } unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) { @@ -842,7 +910,7 @@ impl crate::CommandEncoder for super::CommandEncoder { }; list.set_pipeline_state(pipeline.raw); - list.IASetPrimitiveTopology(pipeline.topology); + unsafe { list.IASetPrimitiveTopology(pipeline.topology) }; for (index, (vb, &stride)) in self .pass @@ -891,7 +959,7 @@ impl crate::CommandEncoder for super::CommandEncoder { MinDepth: depth_range.start, MaxDepth: depth_range.end, }; - self.list.unwrap().RSSetViewports(1, &raw_vp); + unsafe { self.list.unwrap().RSSetViewports(1, &raw_vp) }; } unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect) { let raw_rect = d3d12::D3D12_RECT { @@ -900,7 +968,7 @@ impl crate::CommandEncoder for super::CommandEncoder { right: (rect.x + rect.w) as i32, bottom: (rect.y + rect.h) as i32, }; - self.list.unwrap().RSSetScissorRects(1, &raw_rect); + unsafe { self.list.unwrap().RSSetScissorRects(1, &raw_rect) }; } unsafe fn set_stencil_reference(&mut self, value: u32) { self.list.unwrap().set_stencil_reference(value); @@ -916,7 +984,7 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.prepare_draw(start_vertex as i32, start_instance); + unsafe { self.prepare_draw(start_vertex as i32, start_instance) }; self.list .unwrap() .draw(vertex_count, instance_count, start_vertex, start_instance); @@ -929,7 +997,7 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.prepare_draw(base_vertex, start_instance); + unsafe { self.prepare_draw(base_vertex, start_instance) }; self.list.unwrap().draw_indexed( index_count, instance_count, @@ -944,15 +1012,17 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw.as_mut_ptr(), - draw_count, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } unsafe fn draw_indexed_indirect( &mut self, @@ -960,15 +1030,17 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), - draw_count, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), + draw_count, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } unsafe fn draw_indirect_count( &mut self, @@ -978,15 +1050,17 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw.as_mut_ptr(), - max_count, - buffer.resource.as_mut_ptr(), - offset, - count_buffer.resource.as_mut_ptr(), - count_offset, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw.as_mut_ptr(), + max_count, + buffer.resource.as_mut_ptr(), + offset, + count_buffer.resource.as_mut_ptr(), + count_offset, + ) + }; } unsafe fn draw_indexed_indirect_count( &mut self, @@ -996,24 +1070,26 @@ impl crate::CommandEncoder for super::CommandEncoder { count_offset: wgt::BufferAddress, max_count: u32, ) { - self.prepare_draw(0, 0); - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), - max_count, - buffer.resource.as_mut_ptr(), - offset, - count_buffer.resource.as_mut_ptr(), - count_offset, - ); + unsafe { self.prepare_draw(0, 0) }; + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.draw_indexed.as_mut_ptr(), + max_count, + buffer.resource.as_mut_ptr(), + offset, + count_buffer.resource.as_mut_ptr(), + count_offset, + ) + }; } // compute unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) { - self.begin_pass(super::PassKind::Compute, desc.label); + unsafe { self.begin_pass(super::PassKind::Compute, desc.label) }; } unsafe fn end_compute_pass(&mut self) { - self.end_pass(); + unsafe { self.end_pass() }; } unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) { @@ -1035,13 +1111,15 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) { self.prepare_dispatch([0; 3]); //TODO: update special constants indirectly - self.list.unwrap().ExecuteIndirect( - self.shared.cmd_signatures.dispatch.as_mut_ptr(), - 1, - buffer.resource.as_mut_ptr(), - offset, - ptr::null_mut(), - 0, - ); + unsafe { + self.list.unwrap().ExecuteIndirect( + self.shared.cmd_signatures.dispatch.as_mut_ptr(), + 1, + buffer.resource.as_mut_ptr(), + offset, + ptr::null_mut(), + 0, + ) + }; } } diff --git a/wgpu-hal/src/dx12/conv.rs b/wgpu-hal/src/dx12/conv.rs index 4114fba002..bc0f45677d 100644 --- a/wgpu-hal/src/dx12/conv.rs +++ b/wgpu-hal/src/dx12/conv.rs @@ -1,5 +1,8 @@ use std::iter; -use winapi::um::{d3d12, d3dcommon}; +use winapi::{ + shared::minwindef::BOOL, + um::{d3d12, d3dcommon}, +}; pub fn map_buffer_usage_to_resource_flags(usage: crate::BufferUses) -> d3d12::D3D12_RESOURCE_FLAGS { let mut flags = 0; @@ -329,14 +332,14 @@ fn map_stencil_face(face: &wgt::StencilFaceState) -> d3d12::D3D12_DEPTH_STENCILO pub fn map_depth_stencil(ds: &wgt::DepthStencilState) -> d3d12::D3D12_DEPTH_STENCIL_DESC { d3d12::D3D12_DEPTH_STENCIL_DESC { - DepthEnable: if ds.is_depth_enabled() { 1 } else { 0 }, + DepthEnable: BOOL::from(ds.is_depth_enabled()), DepthWriteMask: if ds.depth_write_enabled { d3d12::D3D12_DEPTH_WRITE_MASK_ALL } else { d3d12::D3D12_DEPTH_WRITE_MASK_ZERO }, DepthFunc: map_comparison(ds.depth_compare), - StencilEnable: if ds.stencil.is_enabled() { 1 } else { 0 }, + StencilEnable: BOOL::from(ds.stencil.is_enabled()), StencilReadMask: ds.stencil.read_mask as u8, StencilWriteMask: ds.stencil.write_mask as u8, FrontFace: map_stencil_face(&ds.stencil.front), diff --git a/wgpu-hal/src/dx12/descriptor.rs b/wgpu-hal/src/dx12/descriptor.rs index 67c8eca4fe..46fdd3eecd 100644 --- a/wgpu-hal/src/dx12/descriptor.rs +++ b/wgpu-hal/src/dx12/descriptor.rs @@ -157,7 +157,7 @@ impl FixedSizeHeap { } unsafe fn destroy(&self) { - self.raw.destroy(); + unsafe { self.raw.destroy() }; } } @@ -225,7 +225,7 @@ impl CpuPool { pub(super) unsafe fn destroy(&self) { for heap in &self.heaps { - heap.destroy(); + unsafe { heap.destroy() }; } } } @@ -274,7 +274,7 @@ impl CpuHeap { } pub(super) unsafe fn destroy(self) { - self.inner.into_inner().raw.destroy(); + unsafe { self.inner.into_inner().raw.destroy() }; } } @@ -296,14 +296,16 @@ pub(super) unsafe fn upload( ) -> Result { let count = src.stage.len() as u32; let index = dst.allocate_slice(count as u64)?; - device.CopyDescriptors( - 1, - &dst.cpu_descriptor_at(index), - &count, - count, - src.stage.as_ptr(), - dummy_copy_counts.as_ptr(), - dst.ty as u32, - ); + unsafe { + device.CopyDescriptors( + 1, + &dst.cpu_descriptor_at(index), + &count, + count, + src.stage.as_ptr(), + dummy_copy_counts.as_ptr(), + dst.ty as u32, + ) + }; Ok(dst.at(index, count as u64)) } diff --git a/wgpu-hal/src/dx12/device.rs b/wgpu-hal/src/dx12/device.rs index 42bb343fa4..7e24815d97 100644 --- a/wgpu-hal/src/dx12/device.rs +++ b/wgpu-hal/src/dx12/device.rs @@ -7,7 +7,7 @@ use super::{conv, descriptor, view}; use parking_lot::Mutex; use std::{ffi, mem, num::NonZeroU32, ptr, slice, sync::Arc}; use winapi::{ - shared::{dxgiformat, dxgitype, winerror}, + shared::{dxgiformat, dxgitype, minwindef::BOOL, winerror}, um::{d3d12, d3dcompiler, synchapi, winbase}, Interface, }; @@ -182,7 +182,7 @@ impl super::Device { .fence .set_event_on_completion(self.idler.event, value); hr.into_device_result("Set event")?; - synchapi::WaitForSingleObject(self.idler.event.0, winbase::INFINITE); + unsafe { synchapi::WaitForSingleObject(self.idler.event.0, winbase::INFINITE) }; Ok(()) } @@ -319,13 +319,13 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { self.rtv_pool.lock().free_handle(self.null_rtv_handle); - self.rtv_pool.into_inner().destroy(); - self.dsv_pool.into_inner().destroy(); - self.srv_uav_pool.into_inner().destroy(); - self.sampler_pool.into_inner().destroy(); - self.shared.destroy(); - self.idler.destroy(); - queue.raw.destroy(); + unsafe { self.rtv_pool.into_inner().destroy() }; + unsafe { self.dsv_pool.into_inner().destroy() }; + unsafe { self.srv_uav_pool.into_inner().destroy() }; + unsafe { self.sampler_pool.into_inner().destroy() }; + unsafe { self.shared.destroy() }; + unsafe { self.idler.destroy() }; + unsafe { queue.raw.destroy() }; } unsafe fn create_buffer( @@ -377,30 +377,32 @@ impl crate::Device for super::Device { VisibleNodeMask: 0, }; - let hr = self.raw.CreateCommittedResource( - &heap_properties, - if self.private_caps.heap_create_not_zeroed { - D3D12_HEAP_FLAG_CREATE_NOT_ZEROED - } else { - d3d12::D3D12_HEAP_FLAG_NONE - }, - &raw_desc, - d3d12::D3D12_RESOURCE_STATE_COMMON, - ptr::null(), - &d3d12::ID3D12Resource::uuidof(), - resource.mut_void(), - ); + let hr = unsafe { + self.raw.CreateCommittedResource( + &heap_properties, + if self.private_caps.heap_create_not_zeroed { + D3D12_HEAP_FLAG_CREATE_NOT_ZEROED + } else { + d3d12::D3D12_HEAP_FLAG_NONE + }, + &raw_desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + }; hr.into_device_result("Buffer creation")?; if let Some(label) = desc.label { let cwstr = conv::map_label(label); - resource.SetName(cwstr.as_ptr()); + unsafe { resource.SetName(cwstr.as_ptr()) }; } Ok(super::Buffer { resource, size }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { - buffer.resource.destroy(); + unsafe { buffer.resource.destroy() }; } unsafe fn map_buffer( &self, @@ -408,17 +410,17 @@ impl crate::Device for super::Device { range: crate::MemoryRange, ) -> Result { let mut ptr = ptr::null_mut(); - let hr = (*buffer.resource).Map(0, ptr::null(), &mut ptr); + let hr = unsafe { (*buffer.resource).Map(0, ptr::null(), &mut ptr) }; hr.into_device_result("Map buffer")?; Ok(crate::BufferMapping { - ptr: ptr::NonNull::new(ptr.offset(range.start as isize) as *mut _).unwrap(), + ptr: ptr::NonNull::new(unsafe { ptr.offset(range.start as isize) } as *mut _).unwrap(), //TODO: double-check this. Documentation is a bit misleading - // it implies that Map/Unmap is needed to invalidate/flush memory. is_coherent: true, }) } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> { - (*buffer.resource).Unmap(0, ptr::null()); + unsafe { (*buffer.resource).Unmap(0, ptr::null()) }; Ok(()) } unsafe fn flush_mapped_ranges(&self, _buffer: &super::Buffer, _ranges: I) {} @@ -470,24 +472,26 @@ impl crate::Device for super::Device { VisibleNodeMask: 0, }; - let hr = self.raw.CreateCommittedResource( - &heap_properties, - if self.private_caps.heap_create_not_zeroed { - D3D12_HEAP_FLAG_CREATE_NOT_ZEROED - } else { - d3d12::D3D12_HEAP_FLAG_NONE - }, - &raw_desc, - d3d12::D3D12_RESOURCE_STATE_COMMON, - ptr::null(), // clear value - &d3d12::ID3D12Resource::uuidof(), - resource.mut_void(), - ); + let hr = unsafe { + self.raw.CreateCommittedResource( + &heap_properties, + if self.private_caps.heap_create_not_zeroed { + D3D12_HEAP_FLAG_CREATE_NOT_ZEROED + } else { + d3d12::D3D12_HEAP_FLAG_NONE + }, + &raw_desc, + d3d12::D3D12_RESOURCE_STATE_COMMON, + ptr::null(), // clear value + &d3d12::ID3D12Resource::uuidof(), + resource.mut_void(), + ) + }; hr.into_device_result("Texture creation")?; if let Some(label) = desc.label { let cwstr = conv::map_label(label); - resource.SetName(cwstr.as_ptr()); + unsafe { resource.SetName(cwstr.as_ptr()) }; } Ok(super::Texture { @@ -500,7 +504,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_texture(&self, texture: super::Texture) { - texture.resource.destroy(); + unsafe { texture.resource.destroy() }; } unsafe fn create_texture_view( @@ -518,13 +522,15 @@ impl crate::Device for super::Device { texture.calc_subresource(desc.range.base_mip_level, desc.range.base_array_layer, 0), ), handle_srv: if desc.usage.intersects(crate::TextureUses::RESOURCE) { - let raw_desc = view_desc.to_srv(); + let raw_desc = unsafe { view_desc.to_srv() }; let handle = self.srv_uav_pool.lock().alloc_handle(); - self.raw.CreateShaderResourceView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateShaderResourceView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -532,26 +538,30 @@ impl crate::Device for super::Device { handle_uav: if desc.usage.intersects( crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE, ) { - let raw_desc = view_desc.to_uav(); + let raw_desc = unsafe { view_desc.to_uav() }; let handle = self.srv_uav_pool.lock().alloc_handle(); - self.raw.CreateUnorderedAccessView( - texture.resource.as_mut_ptr(), - ptr::null_mut(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateUnorderedAccessView( + texture.resource.as_mut_ptr(), + ptr::null_mut(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None }, handle_rtv: if desc.usage.intersects(crate::TextureUses::COLOR_TARGET) { - let raw_desc = view_desc.to_rtv(); + let raw_desc = unsafe { view_desc.to_rtv() }; let handle = self.rtv_pool.lock().alloc_handle(); - self.raw.CreateRenderTargetView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateRenderTargetView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -560,13 +570,15 @@ impl crate::Device for super::Device { .usage .intersects(crate::TextureUses::DEPTH_STENCIL_READ) { - let raw_desc = view_desc.to_dsv(desc.format.into()); + let raw_desc = unsafe { view_desc.to_dsv(desc.format.into()) }; let handle = self.dsv_pool.lock().alloc_handle(); - self.raw.CreateDepthStencilView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateDepthStencilView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -575,13 +587,15 @@ impl crate::Device for super::Device { .usage .intersects(crate::TextureUses::DEPTH_STENCIL_WRITE) { - let raw_desc = view_desc.to_dsv(FormatAspects::empty()); + let raw_desc = unsafe { view_desc.to_dsv(FormatAspects::empty()) }; let handle = self.dsv_pool.lock().alloc_handle(); - self.raw.CreateDepthStencilView( - texture.resource.as_mut_ptr(), - &raw_desc, - handle.raw, - ); + unsafe { + self.raw.CreateDepthStencilView( + texture.resource.as_mut_ptr(), + &raw_desc, + handle.raw, + ) + }; Some(handle) } else { None @@ -664,7 +678,7 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { let cwstr = conv::map_label(label); - allocator.SetName(cwstr.as_ptr()); + unsafe { allocator.SetName(cwstr.as_ptr()) }; } Ok(super::CommandEncoder { @@ -681,12 +695,12 @@ impl crate::Device for super::Device { unsafe fn destroy_command_encoder(&self, encoder: super::CommandEncoder) { if let Some(list) = encoder.list { list.close(); - list.destroy(); + unsafe { list.destroy() }; } for list in encoder.free_lists { - list.destroy(); + unsafe { list.destroy() }; } - encoder.allocator.destroy(); + unsafe { encoder.allocator.destroy() }; } unsafe fn create_bind_group_layout( @@ -737,10 +751,10 @@ impl crate::Device for super::Device { } unsafe fn destroy_bind_group_layout(&self, bg_layout: super::BindGroupLayout) { if let Some(cpu_heap) = bg_layout.cpu_heap_views { - cpu_heap.destroy(); + unsafe { cpu_heap.destroy() }; } if let Some(cpu_heap) = bg_layout.cpu_heap_samplers { - cpu_heap.destroy(); + unsafe { cpu_heap.destroy() }; } } @@ -1064,9 +1078,9 @@ impl crate::Device for super::Device { if !error.is_null() { log::error!( "Root signature serialization error: {:?}", - error.as_c_str().to_str().unwrap() + unsafe { error.as_c_str() }.to_str().unwrap() ); - error.destroy(); + unsafe { error.destroy() }; return Err(crate::DeviceError::Lost); } @@ -1074,13 +1088,13 @@ impl crate::Device for super::Device { .raw .create_root_signature(blob, 0) .into_device_result("Root signature creation")?; - blob.destroy(); + unsafe { blob.destroy() }; log::debug!("\traw = {:?}", raw); if let Some(label) = desc.label { let cwstr = conv::map_label(label); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::PipelineLayout { @@ -1101,7 +1115,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_pipeline_layout(&self, pipeline_layout: super::PipelineLayout) { - pipeline_layout.shared.signature.destroy(); + unsafe { pipeline_layout.shared.signature.destroy() }; } unsafe fn create_bind_group( @@ -1155,7 +1169,7 @@ impl crate::Device for super::Device { BufferLocation: gpu_address, SizeInBytes: ((size - 1) | size_mask) + 1, }; - self.raw.CreateConstantBufferView(&raw_desc, handle); + unsafe { self.raw.CreateConstantBufferView(&raw_desc, handle) }; } wgt::BufferBindingType::Storage { read_only: true } => { let mut raw_desc = d3d12::D3D12_SHADER_RESOURCE_VIEW_DESC { @@ -1163,39 +1177,47 @@ impl crate::Device for super::Device { Shader4ComponentMapping: view::D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, ViewDimension: d3d12::D3D12_SRV_DIMENSION_BUFFER, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; - *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { - FirstElement: data.offset / 4, - NumElements: size / 4, - StructureByteStride: 0, - Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, + unsafe { + *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_SRV { + FirstElement: data.offset / 4, + NumElements: size / 4, + StructureByteStride: 0, + Flags: d3d12::D3D12_BUFFER_SRV_FLAG_RAW, + } + }; + unsafe { + self.raw.CreateShaderResourceView( + data.buffer.resource.as_mut_ptr(), + &raw_desc, + handle, + ) }; - self.raw.CreateShaderResourceView( - data.buffer.resource.as_mut_ptr(), - &raw_desc, - handle, - ); } wgt::BufferBindingType::Storage { read_only: false } => { let mut raw_desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, + }; + unsafe { + *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { + FirstElement: data.offset / 4, + NumElements: size / 4, + StructureByteStride: 0, + CounterOffsetInBytes: 0, + Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + } }; - *raw_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { - FirstElement: data.offset / 4, - NumElements: size / 4, - StructureByteStride: 0, - CounterOffsetInBytes: 0, - Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, + unsafe { + self.raw.CreateUnorderedAccessView( + data.buffer.resource.as_mut_ptr(), + ptr::null_mut(), + &raw_desc, + handle, + ) }; - self.raw.CreateUnorderedAccessView( - data.buffer.resource.as_mut_ptr(), - ptr::null_mut(), - &raw_desc, - handle, - ); } } inner.stage.push(handle); @@ -1229,24 +1251,28 @@ impl crate::Device for super::Device { let handle_views = match cpu_views { Some(inner) => { - let dual = descriptor::upload( - self.raw, - &*inner, - &self.shared.heap_views, - &desc.layout.copy_counts, - )?; + let dual = unsafe { + descriptor::upload( + self.raw, + &inner, + &self.shared.heap_views, + &desc.layout.copy_counts, + ) + }?; Some(dual) } None => None, }; let handle_samplers = match cpu_samplers { Some(inner) => { - let dual = descriptor::upload( - self.raw, - &*inner, - &self.shared.heap_samplers, - &desc.layout.copy_counts, - )?; + let dual = unsafe { + descriptor::upload( + self.raw, + &inner, + &self.shared.heap_samplers, + &desc.layout.copy_counts, + ) + }?; Some(dual) } None => None, @@ -1260,10 +1286,10 @@ impl crate::Device for super::Device { } unsafe fn destroy_bind_group(&self, group: super::BindGroup) { if let Some(dual) = group.handle_views { - let _ = self.shared.heap_views.free_slice(dual); + self.shared.heap_views.free_slice(dual); } if let Some(dual) = group.handle_samplers { - let _ = self.shared.heap_samplers.free_slice(dual); + self.shared.heap_samplers.free_slice(dual); } } @@ -1358,8 +1384,8 @@ impl crate::Device for super::Device { DepthBias: bias.constant, DepthBiasClamp: bias.clamp, SlopeScaledDepthBias: bias.slope_scale, - DepthClipEnable: if desc.primitive.unclipped_depth { 0 } else { 1 }, - MultisampleEnable: if desc.multisample.count > 1 { 1 } else { 0 }, + DepthClipEnable: BOOL::from(!desc.primitive.unclipped_depth), + MultisampleEnable: BOOL::from(desc.multisample.count > 1), ForcedSampleCount: 0, AntialiasedLineEnable: 0, ConservativeRaster: if desc.primitive.conservative { @@ -1388,11 +1414,7 @@ impl crate::Device for super::Device { RasterizedStream: 0, }, BlendState: d3d12::D3D12_BLEND_DESC { - AlphaToCoverageEnable: if desc.multisample.alpha_to_coverage_enabled { - 1 - } else { - 0 - }, + AlphaToCoverageEnable: BOOL::from(desc.multisample.alpha_to_coverage_enabled), IndependentBlendEnable: 1, RenderTarget: conv::map_render_targets(desc.color_targets), }, @@ -1400,7 +1422,7 @@ impl crate::Device for super::Device { RasterizerState: raw_rasterizer, DepthStencilState: match desc.depth_stencil { Some(ref ds) => conv::map_depth_stencil(ds), - None => mem::zeroed(), + None => unsafe { mem::zeroed() }, }, InputLayout: d3d12::D3D12_INPUT_LAYOUT_DESC { pInputElementDescs: if input_element_descs.is_empty() { @@ -1441,16 +1463,18 @@ impl crate::Device for super::Device { let mut raw = native::PipelineState::null(); let hr = { profiling::scope!("ID3D12Device::CreateGraphicsPipelineState"); - self.raw.CreateGraphicsPipelineState( - &raw_desc, - &d3d12::ID3D12PipelineState::uuidof(), - raw.mut_void(), - ) + unsafe { + self.raw.CreateGraphicsPipelineState( + &raw_desc, + &d3d12::ID3D12PipelineState::uuidof(), + raw.mut_void(), + ) + } }; - blob_vs.destroy(); + unsafe { blob_vs.destroy() }; if !blob_fs.is_null() { - blob_fs.destroy(); + unsafe { blob_fs.destroy() }; } hr.into_result() @@ -1458,7 +1482,7 @@ impl crate::Device for super::Device { if let Some(name) = desc.label { let cwstr = conv::map_label(name); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::RenderPipeline { @@ -1469,7 +1493,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { - pipeline.raw.destroy(); + unsafe { pipeline.raw.destroy() }; } unsafe fn create_compute_pipeline( @@ -1489,7 +1513,7 @@ impl crate::Device for super::Device { ) }; - blob_cs.destroy(); + unsafe { blob_cs.destroy() }; let raw = pair.into_result().map_err(|err| { crate::PipelineError::Linkage(wgt::ShaderStages::COMPUTE, err.into_owned()) @@ -1497,7 +1521,7 @@ impl crate::Device for super::Device { if let Some(name) = desc.label { let cwstr = conv::map_label(name); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::ComputePipeline { @@ -1506,7 +1530,7 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { - pipeline.raw.destroy(); + unsafe { pipeline.raw.destroy() }; } unsafe fn create_query_set( @@ -1535,34 +1559,36 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { let cwstr = conv::map_label(label); - raw.SetName(cwstr.as_ptr()); + unsafe { raw.SetName(cwstr.as_ptr()) }; } Ok(super::QuerySet { raw, raw_ty }) } unsafe fn destroy_query_set(&self, set: super::QuerySet) { - set.raw.destroy(); + unsafe { set.raw.destroy() }; } unsafe fn create_fence(&self) -> Result { let mut raw = native::Fence::null(); - let hr = self.raw.CreateFence( - 0, - d3d12::D3D12_FENCE_FLAG_NONE, - &d3d12::ID3D12Fence::uuidof(), - raw.mut_void(), - ); + let hr = unsafe { + self.raw.CreateFence( + 0, + d3d12::D3D12_FENCE_FLAG_NONE, + &d3d12::ID3D12Fence::uuidof(), + raw.mut_void(), + ) + }; hr.into_device_result("Fence creation")?; Ok(super::Fence { raw }) } unsafe fn destroy_fence(&self, fence: super::Fence) { - fence.raw.destroy(); + unsafe { fence.raw.destroy() }; } unsafe fn get_fence_value( &self, fence: &super::Fence, ) -> Result { - Ok(fence.raw.GetCompletedValue()) + Ok(unsafe { fence.raw.GetCompletedValue() }) } unsafe fn wait( &self, @@ -1570,13 +1596,13 @@ impl crate::Device for super::Device { value: crate::FenceValue, timeout_ms: u32, ) -> Result { - if fence.raw.GetCompletedValue() >= value { + if unsafe { fence.raw.GetCompletedValue() } >= value { return Ok(true); } let hr = fence.raw.set_event_on_completion(self.idler.event, value); hr.into_device_result("Set event")?; - match synchapi::WaitForSingleObject(self.idler.event.0, timeout_ms) { + match unsafe { synchapi::WaitForSingleObject(self.idler.event.0, timeout_ms) } { winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::DeviceError::Lost), winbase::WAIT_OBJECT_0 => Ok(true), winerror::WAIT_TIMEOUT => Ok(false), @@ -1590,8 +1616,10 @@ impl crate::Device for super::Device { unsafe fn start_capture(&self) -> bool { #[cfg(feature = "renderdoc")] { - self.render_doc - .start_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + unsafe { + self.render_doc + .start_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + } } #[cfg(not(feature = "renderdoc"))] false @@ -1599,7 +1627,9 @@ impl crate::Device for super::Device { unsafe fn stop_capture(&self) { #[cfg(feature = "renderdoc")] - self.render_doc - .end_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(self.raw.as_mut_ptr() as *mut _, ptr::null_mut()) + } } } diff --git a/wgpu-hal/src/dx12/instance.rs b/wgpu-hal/src/dx12/instance.rs index b300c39857..71f53e140e 100644 --- a/wgpu-hal/src/dx12/instance.rs +++ b/wgpu-hal/src/dx12/instance.rs @@ -21,7 +21,7 @@ impl crate::Instance for super::Instance { Ok(pair) => match pair.into_result() { Ok(debug_controller) => { debug_controller.enable_layer(); - debug_controller.Release(); + unsafe { debug_controller.Release() }; } Err(err) => { log::warn!("Unable to enable D3D12 debug interface: {}", err); @@ -43,11 +43,13 @@ impl crate::Instance for super::Instance { #[allow(trivial_casts)] if let Some(factory5) = factory.as_factory5() { let mut allow_tearing: minwindef::BOOL = minwindef::FALSE; - let hr = factory5.CheckFeatureSupport( - dxgi1_5::DXGI_FEATURE_PRESENT_ALLOW_TEARING, - &mut allow_tearing as *mut _ as *mut _, - mem::size_of::() as _, - ); + let hr = unsafe { + factory5.CheckFeatureSupport( + dxgi1_5::DXGI_FEATURE_PRESENT_ALLOW_TEARING, + &mut allow_tearing as *mut _ as *mut _, + mem::size_of::() as _, + ) + }; match hr.into_result() { Err(err) => log::warn!("Unable to check for tearing support: {}", err), diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index 6f9f18b6ca..6fdd26dba7 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -100,7 +100,7 @@ impl Instance { ) -> Surface { Surface { factory: self.factory, - target: SurfaceTarget::Visual(native::WeakPtr::from_raw(visual)), + target: SurfaceTarget::Visual(unsafe { native::WeakPtr::from_raw(visual) }), supports_allow_tearing: self.supports_allow_tearing, swap_chain: None, } @@ -183,7 +183,7 @@ struct Idler { impl Idler { unsafe fn destroy(self) { - self.fence.destroy(); + unsafe { self.fence.destroy() }; } } @@ -195,9 +195,11 @@ struct CommandSignatures { impl CommandSignatures { unsafe fn destroy(&self) { - self.draw.destroy(); - self.draw_indexed.destroy(); - self.dispatch.destroy(); + unsafe { + self.draw.destroy(); + self.draw_indexed.destroy(); + self.dispatch.destroy(); + } } } @@ -210,10 +212,12 @@ struct DeviceShared { impl DeviceShared { unsafe fn destroy(&self) { - self.zero_buffer.destroy(); - self.cmd_signatures.destroy(); - self.heap_views.raw.destroy(); - self.heap_samplers.raw.destroy(); + unsafe { + self.zero_buffer.destroy(); + self.cmd_signatures.destroy(); + self.heap_views.raw.destroy(); + self.heap_samplers.raw.destroy(); + } } } @@ -548,7 +552,7 @@ unsafe impl Sync for ComputePipeline {} impl SwapChain { unsafe fn release_resources(self) -> native::WeakPtr { for resource in self.resources { - resource.destroy(); + unsafe { resource.destroy() }; } self.raw } @@ -561,7 +565,7 @@ impl SwapChain { Some(duration) => duration.as_millis() as u32, None => winbase::INFINITE, }; - match synchapi::WaitForSingleObject(self.waitable, timeout_ms) { + match unsafe { synchapi::WaitForSingleObject(self.waitable, timeout_ms) } { winbase::WAIT_ABANDONED | winbase::WAIT_FAILED => Err(crate::SurfaceError::Lost), winbase::WAIT_OBJECT_0 => Ok(true), winerror::WAIT_TIMEOUT => Ok(false), @@ -593,16 +597,18 @@ impl crate::Surface for Surface { //Note: this path doesn't properly re-initialize all of the things Some(sc) => { // can't have image resources in flight used by GPU - let _ = device.wait_idle(); - - let raw = sc.release_resources(); - let result = raw.ResizeBuffers( - config.swap_chain_size, - config.extent.width, - config.extent.height, - non_srgb_format, - flags, - ); + let _ = unsafe { device.wait_idle() }; + + let raw = unsafe { sc.release_resources() }; + let result = unsafe { + raw.ResizeBuffers( + config.swap_chain_size, + config.extent.width, + config.extent.height, + non_srgb_format, + flags, + ) + }; if let Err(err) = result.into_result() { log::error!("ResizeBuffers failed: {}", err); return Err(crate::SurfaceError::Other("window is in use")); @@ -664,7 +670,8 @@ impl crate::Surface for Surface { match self.target { SurfaceTarget::WndHandle(_) => {} SurfaceTarget::Visual(visual) => { - if let Err(err) = visual.SetContent(swap_chain1.as_unknown()).into_result() + if let Err(err) = + unsafe { visual.SetContent(swap_chain1.as_unknown()) }.into_result() { log::error!("Unable to SetContent: {}", err); return Err(crate::SurfaceError::Other( @@ -674,9 +681,9 @@ impl crate::Surface for Surface { } } - match swap_chain1.cast::().into_result() { + match unsafe { swap_chain1.cast::() }.into_result() { Ok(swap_chain3) => { - swap_chain1.destroy(); + unsafe { swap_chain1.destroy() }; swap_chain3 } Err(err) => { @@ -692,20 +699,24 @@ impl crate::Surface for Surface { // Disable automatic Alt+Enter handling by DXGI. const DXGI_MWA_NO_WINDOW_CHANGES: u32 = 1; const DXGI_MWA_NO_ALT_ENTER: u32 = 2; - self.factory.MakeWindowAssociation( - wnd_handle, - DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER, - ); + unsafe { + self.factory.MakeWindowAssociation( + wnd_handle, + DXGI_MWA_NO_WINDOW_CHANGES | DXGI_MWA_NO_ALT_ENTER, + ) + }; } SurfaceTarget::Visual(_) => {} } - swap_chain.SetMaximumFrameLatency(config.swap_chain_size); - let waitable = swap_chain.GetFrameLatencyWaitableObject(); + unsafe { swap_chain.SetMaximumFrameLatency(config.swap_chain_size) }; + let waitable = unsafe { swap_chain.GetFrameLatencyWaitableObject() }; let mut resources = vec![native::Resource::null(); config.swap_chain_size as usize]; for (i, res) in resources.iter_mut().enumerate() { - swap_chain.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void()); + unsafe { + swap_chain.GetBuffer(i as _, &d3d12::ID3D12Resource::uuidof(), res.mut_void()) + }; } self.swap_chain = Some(SwapChain { @@ -723,12 +734,14 @@ impl crate::Surface for Surface { unsafe fn unconfigure(&mut self, device: &Device) { if let Some(mut sc) = self.swap_chain.take() { - let _ = sc.wait(None); - //TODO: this shouldn't be needed, - // but it complains that the queue is still used otherwise - let _ = device.wait_idle(); - let raw = sc.release_resources(); - raw.destroy(); + unsafe { + let _ = sc.wait(None); + //TODO: this shouldn't be needed, + // but it complains that the queue is still used otherwise + let _ = device.wait_idle(); + let raw = sc.release_resources(); + raw.destroy(); + } } } @@ -738,9 +751,9 @@ impl crate::Surface for Surface { ) -> Result>, crate::SurfaceError> { let sc = self.swap_chain.as_mut().unwrap(); - sc.wait(timeout)?; + unsafe { sc.wait(timeout) }?; - let base_index = sc.raw.GetCurrentBackBufferIndex() as usize; + let base_index = unsafe { sc.raw.GetCurrentBackBufferIndex() } as usize; let index = (base_index + sc.acquired_count) % sc.resources.len(); sc.acquired_count += 1; @@ -803,14 +816,14 @@ impl crate::Queue for Queue { }; profiling::scope!("IDXGISwapchain3::Present"); - sc.raw.Present(interval, flags); + unsafe { sc.raw.Present(interval, flags) }; Ok(()) } unsafe fn get_timestamp_period(&self) -> f32 { let mut frequency = 0u64; - self.raw.GetTimestampFrequency(&mut frequency); + unsafe { self.raw.GetTimestampFrequency(&mut frequency) }; (1_000_000_000.0 / frequency as f64) as f32 } } diff --git a/wgpu-hal/src/dx12/view.rs b/wgpu-hal/src/dx12/view.rs index 81e187aaf1..39d9707312 100644 --- a/wgpu-hal/src/dx12/view.rs +++ b/wgpu-hal/src/dx12/view.rs @@ -42,16 +42,18 @@ impl ViewDescriptor { Format: self.format_nodepth, ViewDimension: 0, Shader4ComponentMapping: D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } /* @@ -67,67 +69,81 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_SRV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_SRV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - PlaneSlice: 0, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_SRV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_SRV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::Cube if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBE; - *desc.u.TextureCube_mut() = d3d12::D3D12_TEXCUBE_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.TextureCube_mut() = d3d12::D3D12_TEXCUBE_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + ResourceMinLODClamp: 0.0, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { desc.ViewDimension = d3d12::D3D12_SRV_DIMENSION_TEXTURECUBEARRAY; - *desc.u.TextureCubeArray_mut() = d3d12::D3D12_TEXCUBE_ARRAY_SRV { - MostDetailedMip: self.mip_level_base, - MipLevels: self.mip_level_count, - First2DArrayFace: self.array_layer_base, - NumCubes: if self.array_layer_count == !0 { - !0 - } else { - self.array_layer_count / 6 - }, - ResourceMinLODClamp: 0.0, + unsafe { + *desc.u.TextureCubeArray_mut() = d3d12::D3D12_TEXCUBE_ARRAY_SRV { + MostDetailedMip: self.mip_level_base, + MipLevels: self.mip_level_count, + First2DArrayFace: self.array_layer_base, + NumCubes: if self.array_layer_count == !0 { + !0 + } else { + self.array_layer_count / 6 + }, + ResourceMinLODClamp: 0.0, + } } } } @@ -139,14 +155,16 @@ impl ViewDescriptor { let mut desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: self.format_nodepth, ViewDimension: 0, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_UAV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_UAV { + MipSlice: self.mip_level_base, + } } } /* @@ -160,26 +178,32 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_UAV { - MipSlice: self.mip_level_base, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_UAV { + MipSlice: self.mip_level_base, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_UAV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_UAV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_UAV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_UAV { - MipSlice: self.mip_level_base, - FirstWSlice: self.array_layer_base, - WSize: self.array_layer_count, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_UAV { + MipSlice: self.mip_level_base, + FirstWSlice: self.array_layer_base, + WSize: self.array_layer_count, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { @@ -194,14 +218,16 @@ impl ViewDescriptor { let mut desc = d3d12::D3D12_RENDER_TARGET_VIEW_DESC { Format: self.format, ViewDimension: 0, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_RTV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_RTV { + MipSlice: self.mip_level_base, + } } } /* @@ -215,41 +241,51 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_RTV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_RTV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_RTV { - MipSlice: self.mip_level_base, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_RTV { + MipSlice: self.mip_level_base, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_RTV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_RTV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_RTV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, - PlaneSlice: 0, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_RTV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + PlaneSlice: 0, + } } } wgt::TextureViewDimension::D3 => { desc.ViewDimension = d3d12::D3D12_RTV_DIMENSION_TEXTURE3D; - *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_RTV { - MipSlice: self.mip_level_base, - FirstWSlice: self.array_layer_base, - WSize: self.array_layer_count, + unsafe { + *desc.u.Texture3D_mut() = d3d12::D3D12_TEX3D_RTV { + MipSlice: self.mip_level_base, + FirstWSlice: self.array_layer_base, + WSize: self.array_layer_count, + } } } wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { @@ -277,14 +313,16 @@ impl ViewDescriptor { } flags }, - u: mem::zeroed(), + u: unsafe { mem::zeroed() }, }; match self.dimension { wgt::TextureViewDimension::D1 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE1D; - *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_DSV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture1D_mut() = d3d12::D3D12_TEX1D_DSV { + MipSlice: self.mip_level_base, + } } } /* @@ -298,31 +336,39 @@ impl ViewDescriptor { }*/ wgt::TextureViewDimension::D2 if self.multisampled && self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMS; - *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_DSV { - UnusedField_NothingToDefine: 0, + unsafe { + *desc.u.Texture2DMS_mut() = d3d12::D3D12_TEX2DMS_DSV { + UnusedField_NothingToDefine: 0, + } } } wgt::TextureViewDimension::D2 if self.array_layer_base == 0 => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2D; - *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_DSV { - MipSlice: self.mip_level_base, + unsafe { + *desc.u.Texture2D_mut() = d3d12::D3D12_TEX2D_DSV { + MipSlice: self.mip_level_base, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array if self.multisampled => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY; - *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_DSV { - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DMSArray_mut() = d3d12::D3D12_TEX2DMS_ARRAY_DSV { + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => { desc.ViewDimension = d3d12::D3D12_DSV_DIMENSION_TEXTURE2DARRAY; - *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_DSV { - MipSlice: self.mip_level_base, - FirstArraySlice: self.array_layer_base, - ArraySize: self.array_layer_count, + unsafe { + *desc.u.Texture2DArray_mut() = d3d12::D3D12_TEX2D_ARRAY_DSV { + MipSlice: self.mip_level_base, + FirstArraySlice: self.array_layer_base, + ArraySize: self.array_layer_count, + } } } wgt::TextureViewDimension::D3 diff --git a/wgpu-hal/src/empty.rs b/wgpu-hal/src/empty.rs index b13ceb9489..a761ef7fb1 100644 --- a/wgpu-hal/src/empty.rs +++ b/wgpu-hal/src/empty.rs @@ -89,6 +89,7 @@ impl crate::Adapter for Context { ) -> crate::TextureFormatCapabilities { crate::TextureFormatCapabilities::empty() } + unsafe fn surface_capabilities(&self, surface: &Context) -> Option { None } diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 8e2eda06f9..399655febb 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -191,13 +191,12 @@ impl super::Adapter { (glow::VENDOR, glow::RENDERER) }; let (vendor, renderer) = { - let vendor = gl.get_parameter_string(vendor_const); - let renderer = gl.get_parameter_string(renderer_const); + let vendor = unsafe { gl.get_parameter_string(vendor_const) }; + let renderer = unsafe { gl.get_parameter_string(renderer_const) }; (vendor, renderer) }; - let version = gl.get_parameter_string(glow::VERSION); - + let version = unsafe { gl.get_parameter_string(glow::VERSION) }; log::info!("Vendor: {}", vendor); log::info!("Renderer: {}", renderer); log::info!("Version: {}", version); @@ -218,7 +217,7 @@ impl super::Adapter { let supports_work_group_params = ver >= (3, 1); let shading_language_version = { - let sl_version = gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION); + let sl_version = unsafe { gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION) }; log::info!("SL version: {}", &sl_version); let (sl_major, sl_minor) = Self::parse_version(&sl_version).ok()?; let value = sl_major as u16 * 100 + sl_minor as u16 * 10; @@ -232,27 +231,27 @@ impl super::Adapter { let is_angle = renderer.contains("ANGLE"); let vertex_shader_storage_blocks = if supports_storage { - gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) } as u32) } else { 0 }; let fragment_shader_storage_blocks = if supports_storage { - gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) } as u32) } else { 0 }; let vertex_shader_storage_textures = if supports_storage { - gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) } as u32) } else { 0 }; let fragment_shader_storage_textures = if supports_storage { - gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) } as u32) } else { 0 }; let max_storage_block_size = if supports_storage { - gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } as u32) } else { 0 }; @@ -312,6 +311,11 @@ impl super::Adapter { wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED, !(cfg!(target_arch = "wasm32") || is_angle), ); + // see https://registry.khronos.org/webgl/specs/latest/2.0/#BUFFER_OBJECT_BINDING + downlevel_flags.set( + wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER, + !cfg!(target_arch = "wasm32"), + ); let mut features = wgt::Features::empty() | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES @@ -330,6 +334,10 @@ impl super::Adapter { downlevel_flags.contains(wgt::DownlevelFlags::VERTEX_STORAGE) && vertex_shader_storage_textures != 0, ); + features.set( + wgt::Features::MULTIVIEW, + extensions.contains("OVR_multiview2"), + ); let gles_bcn_exts = [ "GL_EXT_texture_compression_s3tc_srgb", "GL_EXT_texture_compression_rgtc", @@ -414,24 +422,25 @@ impl super::Adapter { color_buffer_float, ); - let max_texture_size = gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) as u32; - let max_texture_3d_size = gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) as u32; + let max_texture_size = unsafe { gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) } as u32; + let max_texture_3d_size = unsafe { gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) } as u32; let min_uniform_buffer_offset_alignment = - gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT) as u32; + (unsafe { gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT) } as u32); let min_storage_buffer_offset_alignment = if ver >= (3, 1) { - gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) as u32 + (unsafe { gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) } as u32) } else { 256 }; let max_uniform_buffers_per_shader_stage = - gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) - .min(gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS)) as u32; + unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) } + .min(unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS) }) + as u32; let max_compute_workgroups_per_dimension = if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 0) - .min(gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 1)) - .min(gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 2)) + unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 0) } + .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 1) }) + .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 2) }) as u32 } else { 0 @@ -441,7 +450,9 @@ impl super::Adapter { max_texture_dimension_1d: max_texture_size, max_texture_dimension_2d: max_texture_size, max_texture_dimension_3d: max_texture_3d_size, - max_texture_array_layers: gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS) as u32, + max_texture_array_layers: unsafe { + gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS) + } as u32, max_bind_groups: crate::MAX_BIND_GROUPS as u32, max_bindings_per_bind_group: 65535, max_dynamic_uniform_buffers_per_pipeline_layout: max_uniform_buffers_per_shader_stage, @@ -451,56 +462,62 @@ impl super::Adapter { max_storage_buffers_per_shader_stage, max_storage_textures_per_shader_stage, max_uniform_buffers_per_shader_stage, - max_uniform_buffer_binding_size: gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE) - as u32, + max_uniform_buffer_binding_size: unsafe { + gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE) + } as u32, max_storage_buffer_binding_size: if ver >= (3, 1) { - gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) + unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } } else { 0 } as u32, max_vertex_buffers: if private_caps .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT) { - gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) } as u32) } else { 16 // should this be different? }, - max_vertex_attributes: (gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) as u32) + max_vertex_attributes: (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) } + as u32) .min(super::MAX_VERTEX_ATTRIBUTES as u32), max_vertex_buffer_array_stride: if private_caps .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT) { - gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) } as u32) } else { !0 }, max_push_constant_size: super::MAX_PUSH_CONSTANTS as u32 * 4, min_uniform_buffer_offset_alignment, min_storage_buffer_offset_alignment, - max_inter_stage_shader_components: gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS) - as u32, + max_inter_stage_shader_components: unsafe { + gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS) + } as u32, max_compute_workgroup_storage_size: if supports_work_group_params { - gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) } as u32) } else { 0 }, max_compute_invocations_per_workgroup: if supports_work_group_params { - gl.get_parameter_i32(glow::MAX_COMPUTE_WORK_GROUP_INVOCATIONS) as u32 + (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_WORK_GROUP_INVOCATIONS) } as u32) } else { 0 }, max_compute_workgroup_size_x: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 0) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 0) } + as u32) } else { 0 }, max_compute_workgroup_size_y: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 1) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 1) } + as u32) } else { 0 }, max_compute_workgroup_size_z: if supports_work_group_params { - gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 2) as u32 + (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 2) } + as u32) } else { 0 }, @@ -570,27 +587,22 @@ impl super::Adapter { unsafe fn create_shader_clear_program( gl: &glow::Context, ) -> (glow::Program, glow::UniformLocation) { - let program = gl - .create_program() - .expect("Could not create shader program"); - let vertex = gl - .create_shader(glow::VERTEX_SHADER) - .expect("Could not create shader"); - gl.shader_source(vertex, include_str!("./shaders/clear.vert")); - gl.compile_shader(vertex); - let fragment = gl - .create_shader(glow::FRAGMENT_SHADER) - .expect("Could not create shader"); - gl.shader_source(fragment, include_str!("./shaders/clear.frag")); - gl.compile_shader(fragment); - gl.attach_shader(program, vertex); - gl.attach_shader(program, fragment); - gl.link_program(program); - let color_uniform_location = gl - .get_uniform_location(program, "color") + let program = unsafe { gl.create_program() }.expect("Could not create shader program"); + let vertex = + unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(vertex, include_str!("./shaders/clear.vert")) }; + unsafe { gl.compile_shader(vertex) }; + let fragment = + unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(fragment, include_str!("./shaders/clear.frag")) }; + unsafe { gl.compile_shader(fragment) }; + unsafe { gl.attach_shader(program, vertex) }; + unsafe { gl.attach_shader(program, fragment) }; + unsafe { gl.link_program(program) }; + let color_uniform_location = unsafe { gl.get_uniform_location(program, "color") } .expect("Could not find color uniform in shader clear shader"); - gl.delete_shader(vertex); - gl.delete_shader(fragment); + unsafe { gl.delete_shader(vertex) }; + unsafe { gl.delete_shader(fragment) }; (program, color_uniform_location) } @@ -603,24 +615,22 @@ impl crate::Adapter for super::Adapter { _limits: &wgt::Limits, ) -> Result, crate::DeviceError> { let gl = &self.shared.context.lock(); - gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1); - gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1); - let main_vao = gl - .create_vertex_array() - .map_err(|_| crate::DeviceError::OutOfMemory)?; - gl.bind_vertex_array(Some(main_vao)); - - let zero_buffer = gl - .create_buffer() - .map_err(|_| crate::DeviceError::OutOfMemory)?; - gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)); + unsafe { gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1) }; + unsafe { gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1) }; + let main_vao = + unsafe { gl.create_vertex_array() }.map_err(|_| crate::DeviceError::OutOfMemory)?; + unsafe { gl.bind_vertex_array(Some(main_vao)) }; + + let zero_buffer = + unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?; + unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)) }; let zeroes = vec![0u8; super::ZERO_BUFFER_SIZE]; - gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW); + unsafe { gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW) }; // Compile the shader program we use for doing manual clears to work around Mesa fastclear // bug. let (shader_clear_program, shader_clear_program_color_uniform_location) = - Self::create_shader_clear_program(gl); + unsafe { Self::create_shader_clear_program(gl) }; Ok(crate::OpenDevice { device: super::Device { @@ -632,11 +642,9 @@ impl crate::Adapter for super::Adapter { queue: super::Queue { shared: Arc::clone(&self.shared), features, - draw_fbo: gl - .create_framebuffer() + draw_fbo: unsafe { gl.create_framebuffer() } .map_err(|_| crate::DeviceError::OutOfMemory)?, - copy_fbo: gl - .create_framebuffer() + copy_fbo: unsafe { gl.create_framebuffer() } .map_err(|_| crate::DeviceError::OutOfMemory)?, shader_clear_program, shader_clear_program_color_uniform_location, @@ -655,6 +663,22 @@ impl crate::Adapter for super::Adapter { use crate::TextureFormatCapabilities as Tfc; use wgt::TextureFormat as Tf; + let sample_count = { + let max_samples = unsafe { + self.shared + .context + .lock() + .get_parameter_i32(glow::MAX_SAMPLES) + }; + if max_samples >= 8 { + Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 | Tfc::MULTISAMPLE_X8 + } else if max_samples >= 4 { + Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 + } else { + Tfc::MULTISAMPLE_X2 + } + }; + // Base types are pulled from the table in the OpenGLES 3.0 spec in section 3.8. // // The storage types are based on table 8.26, in section @@ -662,10 +686,10 @@ impl crate::Adapter for super::Adapter { let empty = Tfc::empty(); let base = Tfc::COPY_SRC | Tfc::COPY_DST; let unfilterable = base | Tfc::SAMPLED; - let depth = base | Tfc::SAMPLED | Tfc::MULTISAMPLE | Tfc::DEPTH_STENCIL_ATTACHMENT; + let depth = base | Tfc::SAMPLED | sample_count | Tfc::DEPTH_STENCIL_ATTACHMENT; let filterable = unfilterable | Tfc::SAMPLED_LINEAR; let renderable = - unfilterable | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE | Tfc::MULTISAMPLE_RESOLVE; + unfilterable | Tfc::COLOR_ATTACHMENT | sample_count | Tfc::MULTISAMPLE_RESOLVE; let filterable_renderable = filterable | renderable | Tfc::COLOR_ATTACHMENT_BLEND; let storage = base | Tfc::STORAGE | Tfc::STORAGE_READ_WRITE; @@ -692,12 +716,18 @@ impl crate::Adapter for super::Adapter { let half_float_renderable = private_caps_fn( super::PrivateCapabilities::COLOR_BUFFER_HALF_FLOAT, - Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND, + Tfc::COLOR_ATTACHMENT + | Tfc::COLOR_ATTACHMENT_BLEND + | sample_count + | Tfc::MULTISAMPLE_RESOLVE, ); let float_renderable = private_caps_fn( super::PrivateCapabilities::COLOR_BUFFER_FLOAT, - Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND, + Tfc::COLOR_ATTACHMENT + | Tfc::COLOR_ATTACHMENT_BLEND + | sample_count + | Tfc::MULTISAMPLE_RESOLVE, ); match format { @@ -793,7 +823,7 @@ impl crate::Adapter for super::Adapter { wgt::TextureFormat::Bgra8Unorm, ]; if surface.supports_srgb() { - formats.extend(&[ + formats.extend([ wgt::TextureFormat::Rgba8UnormSrgb, #[cfg(not(target_arch = "wasm32"))] wgt::TextureFormat::Bgra8UnormSrgb, @@ -842,16 +872,16 @@ impl super::AdapterShared { .private_caps .contains(super::PrivateCapabilities::GET_BUFFER_SUB_DATA) { - gl.get_buffer_sub_data(target, offset, dst_data); + unsafe { gl.get_buffer_sub_data(target, offset, dst_data) }; } else { log::error!("Fake map"); let length = dst_data.len(); let buffer_mapping = - gl.map_buffer_range(target, offset, length as _, glow::MAP_READ_BIT); + unsafe { gl.map_buffer_range(target, offset, length as _, glow::MAP_READ_BIT) }; - std::ptr::copy_nonoverlapping(buffer_mapping, dst_data.as_mut_ptr(), length); + unsafe { std::ptr::copy_nonoverlapping(buffer_mapping, dst_data.as_mut_ptr(), length) }; - gl.unmap_buffer(target); + unsafe { gl.unmap_buffer(target) }; } } } diff --git a/wgpu-hal/src/gles/command.rs b/wgpu-hal/src/gles/command.rs index beaf600e6e..4a9ee22d07 100644 --- a/wgpu-hal/src/gles/command.rs +++ b/wgpu-hal/src/gles/command.rs @@ -20,6 +20,7 @@ pub(super) struct State { color_targets: ArrayVec, stencil: super::StencilState, depth_bias: wgt::DepthBiasState, + alpha_to_coverage_enabled: bool, samplers: [Option; super::MAX_SAMPLERS], texture_slots: [TextureSlotDesc; super::MAX_TEXTURE_SLOTS], render_size: wgt::Extent3d, @@ -795,6 +796,14 @@ impl crate::CommandEncoder for super::CommandEncoder { .commands .push(C::ConfigureDepthStencil(aspects)); + // set multisampling state + if pipeline.alpha_to_coverage_enabled != self.state.alpha_to_coverage_enabled { + self.state.alpha_to_coverage_enabled = pipeline.alpha_to_coverage_enabled; + self.cmd_buffer + .commands + .push(C::SetAlphaToCoverage(pipeline.alpha_to_coverage_enabled)); + } + // set blend states if self.state.color_targets[..] != pipeline.color_targets[..] { if pipeline diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 6756d1884c..fa4802f9d8 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -20,6 +20,7 @@ struct CompilationContext<'a> { layout: &'a super::PipelineLayout, sampler_map: &'a mut super::SamplerBindMap, name_binding_map: &'a mut NameBindingMap, + multiview: Option, } impl CompilationContext<'_> { @@ -167,21 +168,22 @@ impl super::Device { naga::ShaderStage::Compute => glow::COMPUTE_SHADER, }; - let raw = gl.create_shader(target).unwrap(); + let raw = unsafe { gl.create_shader(target) }.unwrap(); #[cfg(not(target_arch = "wasm32"))] if gl.supports_debug() { //TODO: remove all transmutes from `object_label` // https://github.com/grovesNL/glow/issues/186 - gl.object_label(glow::SHADER, mem::transmute(raw), label); + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::SHADER, name, label) }; } - gl.shader_source(raw, shader); - gl.compile_shader(raw); + unsafe { gl.shader_source(raw, shader) }; + unsafe { gl.compile_shader(raw) }; log::info!("\tCompiled shader {:?}", raw); - let compiled_ok = gl.get_shader_compile_status(raw); - let msg = gl.get_shader_info_log(raw); + let compiled_ok = unsafe { gl.get_shader_compile_status(raw) }; + let msg = unsafe { gl.get_shader_info_log(raw) }; if compiled_ok { if !msg.is_empty() { log::warn!("\tCompile: {}", msg); @@ -205,7 +207,7 @@ impl super::Device { let pipeline_options = glsl::PipelineOptions { shader_stage: naga_stage, entry_point: stage.entry_point.to_string(), - multiview: None, + multiview: context.multiview, }; let shader = &stage.module.naga; @@ -269,12 +271,14 @@ impl super::Device { shaders: I, layout: &super::PipelineLayout, #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>, + multiview: Option, ) -> Result { - let program = gl.create_program().unwrap(); + let program = unsafe { gl.create_program() }.unwrap(); #[cfg(not(target_arch = "wasm32"))] if let Some(label) = label { if gl.supports_debug() { - gl.object_label(glow::PROGRAM, mem::transmute(program), Some(label)); + let name = unsafe { mem::transmute(program) }; + unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) }; } } @@ -289,6 +293,7 @@ impl super::Device { layout, sampler_map: &mut sampler_map, name_binding_map: &mut name_binding_map, + multiview, }; let shader = Self::create_shader(gl, naga_stage, stage, context)?; @@ -303,28 +308,30 @@ impl super::Device { }; let shader_src = format!("#version {} es \n void main(void) {{}}", version,); log::info!("Only vertex shader is present. Creating an empty fragment shader",); - let shader = Self::compile_shader( - gl, - &shader_src, - naga::ShaderStage::Fragment, - Some("(wgpu internal) dummy fragment shader"), - )?; + let shader = unsafe { + Self::compile_shader( + gl, + &shader_src, + naga::ShaderStage::Fragment, + Some("(wgpu internal) dummy fragment shader"), + ) + }?; shaders_to_delete.push(shader); } for &shader in shaders_to_delete.iter() { - gl.attach_shader(program, shader); + unsafe { gl.attach_shader(program, shader) }; } - gl.link_program(program); + unsafe { gl.link_program(program) }; for shader in shaders_to_delete { - gl.delete_shader(shader); + unsafe { gl.delete_shader(shader) }; } log::info!("\tLinked program {:?}", program); - let linked_ok = gl.get_program_link_status(program); - let msg = gl.get_program_info_log(program); + let linked_ok = unsafe { gl.get_program_link_status(program) }; + let msg = unsafe { gl.get_program_info_log(program) }; if !linked_ok { return Err(crate::PipelineError::Linkage(has_stages, msg)); } @@ -339,16 +346,17 @@ impl super::Device { { // This remapping is only needed if we aren't able to put the binding layout // in the shader. We can't remap storage buffers this way. - gl.use_program(Some(program)); + unsafe { gl.use_program(Some(program)) }; for (ref name, (register, slot)) in name_binding_map { log::trace!("Get binding {:?} from program {:?}", name, program); match register { super::BindingRegister::UniformBuffers => { - let index = gl.get_uniform_block_index(program, name).unwrap(); - gl.uniform_block_binding(program, index, slot as _); + let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap(); + unsafe { gl.uniform_block_binding(program, index, slot as _) }; } super::BindingRegister::StorageBuffers => { - let index = gl.get_shader_storage_block_index(program, name).unwrap(); + let index = + unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap(); log::error!( "Unable to re-map shader storage block {} to {}", name, @@ -357,28 +365,26 @@ impl super::Device { return Err(crate::DeviceError::Lost.into()); } super::BindingRegister::Textures | super::BindingRegister::Images => { - gl.uniform_1_i32( - gl.get_uniform_location(program, name).as_ref(), - slot as _, - ); + let location = unsafe { gl.get_uniform_location(program, name) }; + unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) }; } } } } let mut uniforms: [super::UniformDesc; super::MAX_PUSH_CONSTANTS] = Default::default(); - let count = gl.get_active_uniforms(program); + let count = unsafe { gl.get_active_uniforms(program) }; let mut offset = 0; for uniform in 0..count { let glow::ActiveUniform { utype, name, .. } = - gl.get_active_uniform(program, uniform).unwrap(); + unsafe { gl.get_active_uniform(program, uniform) }.unwrap(); if conv::is_sampler(utype) { continue; } - if let Some(location) = gl.get_uniform_location(program, &name) { + if let Some(location) = unsafe { gl.get_uniform_location(program, &name) } { if uniforms[offset / 4].location.is_some() { panic!("Offset already occupied") } @@ -407,10 +413,10 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { let gl = &self.shared.context.lock(); - gl.delete_vertex_array(self.main_vao); - gl.delete_framebuffer(queue.draw_fbo); - gl.delete_framebuffer(queue.copy_fbo); - gl.delete_buffer(queue.zero_buffer); + unsafe { gl.delete_vertex_array(self.main_vao) }; + unsafe { gl.delete_framebuffer(queue.draw_fbo) }; + unsafe { gl.delete_framebuffer(queue.copy_fbo) }; + unsafe { gl.delete_buffer(queue.zero_buffer) }; } unsafe fn create_buffer( @@ -465,8 +471,8 @@ impl crate::Device for super::Device { map_flags |= glow::MAP_WRITE_BIT; } - let raw = Some(gl.create_buffer().unwrap()); - gl.bind_buffer(target, raw); + let raw = Some(unsafe { gl.create_buffer() }.unwrap()); + unsafe { gl.bind_buffer(target, raw) }; let raw_size = desc .size .try_into() @@ -483,7 +489,7 @@ impl crate::Device for super::Device { map_flags |= glow::MAP_COHERENT_BIT; } } - gl.buffer_storage(target, raw_size, None, map_flags); + unsafe { gl.buffer_storage(target, raw_size, None, map_flags) }; } else { assert!(!is_coherent); let usage = if is_host_visible { @@ -495,10 +501,10 @@ impl crate::Device for super::Device { } else { glow::STATIC_DRAW }; - gl.buffer_data_size(target, raw_size, usage); + unsafe { gl.buffer_data_size(target, raw_size, usage) }; } - gl.bind_buffer(target, None); + unsafe { gl.bind_buffer(target, None) }; if !is_coherent && desc.usage.contains(crate::BufferUses::MAP_WRITE) { map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT; @@ -508,7 +514,8 @@ impl crate::Device for super::Device { #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::BUFFER, mem::transmute(raw), Some(label)); + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::BUFFER, name, Some(label)) }; } } @@ -529,7 +536,7 @@ impl crate::Device for super::Device { unsafe fn destroy_buffer(&self, buffer: super::Buffer) { if let Some(raw) = buffer.raw { let gl = &self.shared.context.lock(); - gl.delete_buffer(raw); + unsafe { gl.delete_buffer(raw) }; } } @@ -547,21 +554,23 @@ impl crate::Device for super::Device { } Some(raw) => { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; let ptr = if let Some(ref map_read_allocation) = buffer.data { let mut guard = map_read_allocation.lock().unwrap(); let slice = guard.as_mut_slice(); - self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice); + unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) }; slice.as_mut_ptr() } else { - gl.map_buffer_range( - buffer.target, - range.start as i32, - (range.end - range.start) as i32, - buffer.map_flags, - ) + unsafe { + gl.map_buffer_range( + buffer.target, + range.start as i32, + (range.end - range.start) as i32, + buffer.map_flags, + ) + } }; - gl.bind_buffer(buffer.target, None); + unsafe { gl.bind_buffer(buffer.target, None) }; ptr } }; @@ -574,9 +583,9 @@ impl crate::Device for super::Device { if let Some(raw) = buffer.raw { if buffer.data.is_none() { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); - gl.unmap_buffer(buffer.target); - gl.bind_buffer(buffer.target, None); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; + unsafe { gl.unmap_buffer(buffer.target) }; + unsafe { gl.bind_buffer(buffer.target, None) }; } } Ok(()) @@ -587,13 +596,15 @@ impl crate::Device for super::Device { { if let Some(raw) = buffer.raw { let gl = &self.shared.context.lock(); - gl.bind_buffer(buffer.target, Some(raw)); + unsafe { gl.bind_buffer(buffer.target, Some(raw)) }; for range in ranges { - gl.flush_mapped_buffer_range( - buffer.target, - range.start as i32, - (range.end - range.start) as i32, - ); + unsafe { + gl.flush_mapped_buffer_range( + buffer.target, + range.start as i32, + (range.end - range.start) as i32, + ) + }; } } } @@ -622,89 +633,105 @@ impl crate::Device for super::Device { && desc.dimension == wgt::TextureDimension::D2 && desc.size.depth_or_array_layers == 1 { - let raw = gl.create_renderbuffer().unwrap(); - gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)); + let raw = unsafe { gl.create_renderbuffer().unwrap() }; + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) }; if desc.sample_count > 1 { - gl.renderbuffer_storage_multisample( - glow::RENDERBUFFER, - desc.sample_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.renderbuffer_storage_multisample( + glow::RENDERBUFFER, + desc.sample_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } else { - gl.renderbuffer_storage( - glow::RENDERBUFFER, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.renderbuffer_storage( + glow::RENDERBUFFER, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::RENDERBUFFER, mem::transmute(raw), Some(label)); + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) }; } } - gl.bind_renderbuffer(glow::RENDERBUFFER, None); + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; (super::TextureInner::Renderbuffer { raw }, false) } else { - let raw = gl.create_texture().unwrap(); + let raw = unsafe { gl.create_texture().unwrap() }; let (target, is_3d, is_cubemap) = super::Texture::get_info_from_desc(&mut copy_size, desc); - gl.bind_texture(target, Some(raw)); + unsafe { gl.bind_texture(target, Some(raw)) }; //Note: this has to be done before defining the storage! match desc.format.describe().sample_type { wgt::TextureSampleType::Float { filterable: false } | wgt::TextureSampleType::Uint | wgt::TextureSampleType::Sint => { // reset default filtering mode - gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32); - gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32); + unsafe { + gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32) + }; + unsafe { + gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32) + }; } wgt::TextureSampleType::Float { filterable: true } | wgt::TextureSampleType::Depth => {} } if is_3d { - gl.tex_storage_3d( - target, - desc.mip_level_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - desc.size.depth_or_array_layers as i32, - ); + unsafe { + gl.tex_storage_3d( + target, + desc.mip_level_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + desc.size.depth_or_array_layers as i32, + ) + }; } else if desc.sample_count > 1 { - gl.tex_storage_2d_multisample( - target, - desc.sample_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - true, - ); + unsafe { + gl.tex_storage_2d_multisample( + target, + desc.sample_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + true, + ) + }; } else { - gl.tex_storage_2d( - target, - desc.mip_level_count as i32, - format_desc.internal, - desc.size.width as i32, - desc.size.height as i32, - ); + unsafe { + gl.tex_storage_2d( + target, + desc.mip_level_count as i32, + format_desc.internal, + desc.size.width as i32, + desc.size.height as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::TEXTURE, mem::transmute(raw), Some(label)); + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) }; } } - gl.bind_texture(target, None); + unsafe { gl.bind_texture(target, None) }; (super::TextureInner::Texture { raw, target }, is_cubemap) }; @@ -728,11 +755,11 @@ impl crate::Device for super::Device { let gl = &self.shared.context.lock(); match texture.inner { super::TextureInner::Renderbuffer { raw, .. } => { - gl.delete_renderbuffer(raw); + unsafe { gl.delete_renderbuffer(raw) }; } super::TextureInner::DefaultRenderbuffer => {} super::TextureInner::Texture { raw, .. } => { - gl.delete_texture(raw); + unsafe { gl.delete_texture(raw) }; } } } @@ -774,29 +801,35 @@ impl crate::Device for super::Device { ) -> Result { let gl = &self.shared.context.lock(); - let raw = gl.create_sampler().unwrap(); + let raw = unsafe { gl.create_sampler().unwrap() }; let (min, mag) = conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter); - gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32); - gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32); + unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) }; + unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) }; - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_S, - conv::map_address_mode(desc.address_modes[0]) as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_T, - conv::map_address_mode(desc.address_modes[1]) as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_WRAP_R, - conv::map_address_mode(desc.address_modes[2]) as i32, - ); + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_S, + conv::map_address_mode(desc.address_modes[0]) as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_T, + conv::map_address_mode(desc.address_modes[1]) as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_WRAP_R, + conv::map_address_mode(desc.address_modes[2]) as i32, + ) + }; if let Some(border_color) = desc.border_color { let border = match border_color { @@ -806,37 +839,44 @@ impl crate::Device for super::Device { wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0], wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4], }; - gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border); + unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) }; } if let Some(ref range) = desc.lod_clamp { - gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, range.start); - gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, range.end); + unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, range.start) }; + unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, range.end) }; } if let Some(anisotropy) = desc.anisotropy_clamp { - gl.sampler_parameter_i32(raw, glow::TEXTURE_MAX_ANISOTROPY, anisotropy.get() as i32); + unsafe { + gl.sampler_parameter_i32(raw, glow::TEXTURE_MAX_ANISOTROPY, anisotropy.get() as i32) + }; } //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0); if let Some(compare) = desc.compare { - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_COMPARE_MODE, - glow::COMPARE_REF_TO_TEXTURE as i32, - ); - gl.sampler_parameter_i32( - raw, - glow::TEXTURE_COMPARE_FUNC, - conv::map_compare_func(compare) as i32, - ); + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_COMPARE_MODE, + glow::COMPARE_REF_TO_TEXTURE as i32, + ) + }; + unsafe { + gl.sampler_parameter_i32( + raw, + glow::TEXTURE_COMPARE_FUNC, + conv::map_compare_func(compare) as i32, + ) + }; } #[cfg(not(target_arch = "wasm32"))] if let Some(label) = desc.label { if gl.supports_debug() { - gl.object_label(glow::SAMPLER, mem::transmute(raw), Some(label)); + let name = unsafe { mem::transmute(raw) }; + unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) }; } } @@ -844,7 +884,7 @@ impl crate::Device for super::Device { } unsafe fn destroy_sampler(&self, sampler: super::Sampler) { let gl = &self.shared.context.lock(); - gl.delete_sampler(sampler.raw); + unsafe { gl.delete_sampler(sampler.raw) }; } unsafe fn create_command_encoder( @@ -1032,7 +1072,8 @@ impl crate::Device for super::Device { .as_ref() .map(|fs| (naga::ShaderStage::Fragment, fs)), ); - let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?; + let inner = + unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?; let (vertex_buffers, vertex_attributes) = { let mut buffers = Vec::new(); @@ -1087,11 +1128,12 @@ impl crate::Device for super::Device { .depth_stencil .as_ref() .map(|ds| conv::map_stencil(&ds.stencil)), + alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled, }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { let gl = &self.shared.context.lock(); - gl.delete_program(pipeline.inner.program); + unsafe { gl.delete_program(pipeline.inner.program) }; } unsafe fn create_compute_pipeline( @@ -1100,13 +1142,13 @@ impl crate::Device for super::Device { ) -> Result { let gl = &self.shared.context.lock(); let shaders = iter::once((naga::ShaderStage::Compute, &desc.stage)); - let inner = self.create_pipeline(gl, shaders, desc.layout, desc.label)?; + let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?; Ok(super::ComputePipeline { inner }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { let gl = &self.shared.context.lock(); - gl.delete_program(pipeline.inner.program); + unsafe { gl.delete_program(pipeline.inner.program) }; } #[cfg_attr(target_arch = "wasm32", allow(unused))] @@ -1119,9 +1161,8 @@ impl crate::Device for super::Device { let mut queries = Vec::with_capacity(desc.count as usize); for i in 0..desc.count { - let query = gl - .create_query() - .map_err(|_| crate::DeviceError::OutOfMemory)?; + let query = + unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?; #[cfg(not(target_arch = "wasm32"))] if gl.supports_debug() { use std::fmt::Write; @@ -1129,7 +1170,8 @@ impl crate::Device for super::Device { if let Some(label) = desc.label { temp_string.clear(); let _ = write!(temp_string, "{}[{}]", label, i); - gl.object_label(glow::QUERY, mem::transmute(query), Some(&temp_string)); + let name = unsafe { mem::transmute(query) }; + unsafe { gl.object_label(glow::QUERY, name, Some(&temp_string)) }; } } queries.push(query); @@ -1146,7 +1188,7 @@ impl crate::Device for super::Device { unsafe fn destroy_query_set(&self, set: super::QuerySet) { let gl = &self.shared.context.lock(); for &query in set.queries.iter() { - gl.delete_query(query); + unsafe { gl.delete_query(query) }; } } unsafe fn create_fence(&self) -> Result { @@ -1158,7 +1200,7 @@ impl crate::Device for super::Device { unsafe fn destroy_fence(&self, fence: super::Fence) { let gl = &self.shared.context.lock(); for (_, sync) in fence.pending { - gl.delete_sync(sync); + unsafe { gl.delete_sync(sync) }; } } unsafe fn get_fence_value( @@ -1186,7 +1228,9 @@ impl crate::Device for super::Device { .iter() .find(|&&(value, _)| value >= wait_value) .unwrap(); - match gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) { + match unsafe { + gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) + } { // for some reason firefox returns WAIT_FAILED, to investigate #[cfg(target_arch = "wasm32")] glow::WAIT_FAILED => { @@ -1204,16 +1248,19 @@ impl crate::Device for super::Device { unsafe fn start_capture(&self) -> bool { #[cfg(feature = "renderdoc")] - return self - .render_doc - .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut()); + return unsafe { + self.render_doc + .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut()) + }; #[allow(unreachable_code)] false } unsafe fn stop_capture(&self) { #[cfg(feature = "renderdoc")] - self.render_doc - .end_frame_capture(ptr::null_mut(), ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(ptr::null_mut(), ptr::null_mut()) + } } } diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index 1f4cae00a1..b66047c1ec 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -94,11 +94,11 @@ unsafe extern "system" fn egl_debug_proc( EGL_DEBUG_MSG_INFO_KHR => log::Level::Info, _ => log::Level::Debug, }; - let command = ffi::CStr::from_ptr(command_raw).to_string_lossy(); + let command = unsafe { ffi::CStr::from_ptr(command_raw) }.to_string_lossy(); let message = if message_raw.is_null() { "".into() } else { - ffi::CStr::from_ptr(message_raw).to_string_lossy() + unsafe { ffi::CStr::from_ptr(message_raw) }.to_string_lossy() }; log::log!( @@ -122,7 +122,7 @@ fn open_x_display() -> Option<(ptr::NonNull, libloading::Library)> unsafe fn find_library(paths: &[&str]) -> Option { for path in paths { - match libloading::Library::new(path) { + match unsafe { libloading::Library::new(path) } { Ok(lib) => return Some(lib), _ => continue, }; @@ -629,11 +629,15 @@ impl crate::Instance for Instance { #[cfg(not(feature = "emscripten"))] let egl_result = if cfg!(windows) { - egl::DynamicInstance::::load_required_from_filename("libEGL.dll") + unsafe { + egl::DynamicInstance::::load_required_from_filename("libEGL.dll") + } } else if cfg!(any(target_os = "macos", target_os = "ios")) { - egl::DynamicInstance::::load_required_from_filename("libEGL.dylib") + unsafe { + egl::DynamicInstance::::load_required_from_filename("libEGL.dylib") + } } else { - egl::DynamicInstance::::load_required() + unsafe { egl::DynamicInstance::::load_required() } }; let egl = match egl_result { Ok(egl) => Arc::new(egl), @@ -654,17 +658,17 @@ impl crate::Instance for Instance { client_ext_str.split_whitespace().collect::>() ); - let wayland_library = if client_ext_str.contains(&"EGL_EXT_platform_wayland") { + let wayland_library = if client_ext_str.contains("EGL_EXT_platform_wayland") { test_wayland_display() } else { None }; - let x11_display_library = if client_ext_str.contains(&"EGL_EXT_platform_x11") { + let x11_display_library = if client_ext_str.contains("EGL_EXT_platform_x11") { open_x_display() } else { None }; - let angle_x11_display_library = if client_ext_str.contains(&"EGL_ANGLE_platform_angle") { + let angle_x11_display_library = if client_ext_str.contains("EGL_ANGLE_platform_angle") { open_x_display() } else { None @@ -702,11 +706,7 @@ impl crate::Instance for Instance { EGL_PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE as egl::Attrib, EGL_PLATFORM_X11_KHR as egl::Attrib, EGL_PLATFORM_ANGLE_DEBUG_LAYERS_ENABLED as egl::Attrib, - if desc.flags.contains(crate::InstanceFlags::VALIDATION) { - 1 - } else { - 0 - }, + usize::from(desc.flags.contains(crate::InstanceFlags::VALIDATION)), egl::ATTRIB_NONE, ]; let display = egl @@ -735,11 +735,13 @@ impl crate::Instance for Instance { }; if desc.flags.contains(crate::InstanceFlags::VALIDATION) - && client_ext_str.contains(&"EGL_KHR_debug") + && client_ext_str.contains("EGL_KHR_debug") { log::info!("Enabling EGL debug output"); - let function: EglDebugMessageControlFun = - std::mem::transmute(egl.get_proc_address("eglDebugMessageControlKHR").unwrap()); + let function: EglDebugMessageControlFun = { + let addr = egl.get_proc_address("eglDebugMessageControlKHR").unwrap(); + unsafe { std::mem::transmute(addr) } + }; let attributes = [ EGL_DEBUG_MSG_CRITICAL_KHR as egl::Attrib, 1, @@ -751,7 +753,7 @@ impl crate::Instance for Instance { 1, egl::ATTRIB_NONE, ]; - (function)(Some(egl_debug_proc), attributes.as_ptr()); + unsafe { (function)(Some(egl_debug_proc), attributes.as_ptr()) }; } let inner = Inner::create(desc.flags, egl, display)?; @@ -790,7 +792,9 @@ impl crate::Instance for Instance { .get_config_attrib(inner.egl.display, inner.config, egl::NATIVE_VISUAL_ID) .unwrap(); - let ret = ANativeWindow_setBuffersGeometry(handle.a_native_window, 0, 0, format); + let ret = unsafe { + ANativeWindow_setBuffersGeometry(handle.a_native_window, 0, 0, format) + }; if ret != 0 { log::error!("Error returned from ANativeWindow_setBuffersGeometry"); @@ -862,33 +866,36 @@ impl crate::Instance for Instance { let inner = self.inner.lock(); inner.egl.make_current(); - let gl = glow::Context::from_loader_function(|name| { - inner - .egl - .instance - .get_proc_address(name) - .map_or(ptr::null(), |p| p as *const _) - }); + let gl = unsafe { + glow::Context::from_loader_function(|name| { + inner + .egl + .instance + .get_proc_address(name) + .map_or(ptr::null(), |p| p as *const _) + }) + }; if self.flags.contains(crate::InstanceFlags::DEBUG) && gl.supports_debug() { - log::info!( - "Max label length: {}", + log::info!("Max label length: {}", unsafe { gl.get_parameter_i32(glow::MAX_LABEL_LENGTH) - ); + }); } if self.flags.contains(crate::InstanceFlags::VALIDATION) && gl.supports_debug() { log::info!("Enabling GLES debug output"); - gl.enable(glow::DEBUG_OUTPUT); - gl.debug_message_callback(gl_debug_message_callback); + unsafe { gl.enable(glow::DEBUG_OUTPUT) }; + unsafe { gl.debug_message_callback(gl_debug_message_callback) }; } inner.egl.unmake_current(); - super::Adapter::expose(AdapterContext { - glow: Mutex::new(gl), - egl: Some(inner.egl.clone()), - }) + unsafe { + super::Adapter::expose(AdapterContext { + glow: Mutex::new(gl), + egl: Some(inner.egl.clone()), + }) + } .into_iter() .collect() } @@ -905,10 +912,13 @@ impl super::Adapter { pub unsafe fn new_external( fun: impl FnMut(&str) -> *const ffi::c_void, ) -> Option> { - Self::expose(AdapterContext { - glow: Mutex::new(glow::Context::from_loader_function(fun)), - egl: None, - }) + let context = unsafe { glow::Context::from_loader_function(fun) }; + unsafe { + Self::expose(AdapterContext { + glow: Mutex::new(context), + egl: None, + }) + } } pub fn adapter_context(&self) -> &AdapterContext { @@ -972,27 +982,29 @@ impl Surface { crate::SurfaceError::Lost })?; - gl.disable(glow::SCISSOR_TEST); - gl.color_mask(true, true, true, true); + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.color_mask(true, true, true, true) }; - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(sc.framebuffer)); + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(sc.framebuffer)) }; // Note the Y-flipping here. GL's presentation is not flipped, // but main rendering is. Therefore, we Y-flip the output positions // in the shader, and also this blit. - gl.blit_framebuffer( - 0, - sc.extent.height as i32, - sc.extent.width as i32, - 0, - 0, - 0, - sc.extent.width as i32, - sc.extent.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); + unsafe { + gl.blit_framebuffer( + 0, + sc.extent.height as i32, + sc.extent.width as i32, + 0, + 0, + 0, + sc.extent.width as i32, + sc.extent.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; self.egl .instance @@ -1019,8 +1031,8 @@ impl Surface { let gl = &device.shared.context.lock(); match self.swapchain.take() { Some(sc) => { - gl.delete_renderbuffer(sc.renderbuffer); - gl.delete_framebuffer(sc.framebuffer); + unsafe { gl.delete_renderbuffer(sc.renderbuffer) }; + unsafe { gl.delete_framebuffer(sc.framebuffer) }; Some((sc.surface, sc.wl_window)) } None => None, @@ -1043,7 +1055,7 @@ impl crate::Surface for Surface { ) -> Result<(), crate::SurfaceError> { use raw_window_handle::RawWindowHandle as Rwh; - let (surface, wl_window) = match self.unconfigure_impl(device) { + let (surface, wl_window) = match unsafe { self.unconfigure_impl(device) } { Some(pair) => pair, None => { let mut wl_window = None; @@ -1068,9 +1080,9 @@ impl crate::Surface for Surface { (WindowKind::Wayland, Rwh::Wayland(handle)) => { let library = self.wsi.library.as_ref().unwrap(); let wl_egl_window_create: libloading::Symbol = - library.get(b"wl_egl_window_create").unwrap(); - let window = wl_egl_window_create(handle.surface, 640, 480) as *mut _ - as *mut std::ffi::c_void; + unsafe { library.get(b"wl_egl_window_create") }.unwrap(); + let window = unsafe { wl_egl_window_create(handle.surface, 640, 480) } + as *mut _ as *mut std::ffi::c_void; wl_window = Some(window); window } @@ -1147,12 +1159,14 @@ impl crate::Surface for Surface { &attributes_usize, ) } - _ => self.egl.instance.create_window_surface( - self.egl.display, - self.config, - native_window_ptr, - Some(&attributes), - ), + _ => unsafe { + self.egl.instance.create_window_surface( + self.egl.display, + self.config, + native_window_ptr, + Some(&attributes), + ) + }, }; match raw_result { @@ -1168,36 +1182,42 @@ impl crate::Surface for Surface { if let Some(window) = wl_window { let library = self.wsi.library.as_ref().unwrap(); let wl_egl_window_resize: libloading::Symbol = - library.get(b"wl_egl_window_resize").unwrap(); - wl_egl_window_resize( - window, - config.extent.width as i32, - config.extent.height as i32, - 0, - 0, - ); + unsafe { library.get(b"wl_egl_window_resize") }.unwrap(); + unsafe { + wl_egl_window_resize( + window, + config.extent.width as i32, + config.extent.height as i32, + 0, + 0, + ) + }; } let format_desc = device.shared.describe_texture_format(config.format); let gl = &device.shared.context.lock(); - let renderbuffer = gl.create_renderbuffer().unwrap(); - gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)); - gl.renderbuffer_storage( - glow::RENDERBUFFER, - format_desc.internal, - config.extent.width as _, - config.extent.height as _, - ); - let framebuffer = gl.create_framebuffer().unwrap(); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)); - gl.framebuffer_renderbuffer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::RENDERBUFFER, - Some(renderbuffer), - ); - gl.bind_renderbuffer(glow::RENDERBUFFER, None); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); + let renderbuffer = unsafe { gl.create_renderbuffer() }.unwrap(); + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)) }; + unsafe { + gl.renderbuffer_storage( + glow::RENDERBUFFER, + format_desc.internal, + config.extent.width as _, + config.extent.height as _, + ) + }; + let framebuffer = unsafe { gl.create_framebuffer() }.unwrap(); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_renderbuffer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::RENDERBUFFER, + Some(renderbuffer), + ) + }; + unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; self.swapchain = Some(Swapchain { surface, @@ -1214,20 +1234,16 @@ impl crate::Surface for Surface { } unsafe fn unconfigure(&mut self, device: &super::Device) { - if let Some((surface, wl_window)) = self.unconfigure_impl(device) { + if let Some((surface, wl_window)) = unsafe { self.unconfigure_impl(device) } { self.egl .instance .destroy_surface(self.egl.display, surface) .unwrap(); if let Some(window) = wl_window { - let wl_egl_window_destroy: libloading::Symbol = self - .wsi - .library - .as_ref() - .expect("unsupported window") - .get(b"wl_egl_window_destroy") - .unwrap(); - wl_egl_window_destroy(window); + let library = self.wsi.library.as_ref().expect("unsupported window"); + let wl_egl_window_destroy: libloading::Symbol = + unsafe { library.get(b"wl_egl_window_destroy") }.unwrap(); + unsafe { wl_egl_window_destroy(window) }; } } } diff --git a/wgpu-hal/src/gles/mod.rs b/wgpu-hal/src/gles/mod.rs index 929f369833..bad5c08b91 100644 --- a/wgpu-hal/src/gles/mod.rs +++ b/wgpu-hal/src/gles/mod.rs @@ -502,6 +502,7 @@ pub struct RenderPipeline { depth: Option, depth_bias: wgt::DepthBiasState, stencil: Option, + alpha_to_coverage_enabled: bool, } // SAFE: WASM doesn't have threads @@ -742,6 +743,7 @@ enum Command { SetDepth(DepthState), SetDepthBias(wgt::DepthBiasState), ConfigureDepthStencil(crate::FormatAspects), + SetAlphaToCoverage(bool), SetVertexAttribute { buffer: Option, buffer_desc: VertexBufferDesc, diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 6cdccd9141..75770c501c 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -30,49 +30,51 @@ fn is_layered_target(target: super::BindTarget) -> bool { impl super::Queue { /// Performs a manual shader clear, used as a workaround for a clearing bug on mesa unsafe fn perform_shader_clear(&self, gl: &glow::Context, draw_buffer: u32, color: [f32; 4]) { - gl.use_program(Some(self.shader_clear_program)); - gl.uniform_4_f32( - Some(&self.shader_clear_program_color_uniform_location), - color[0], - color[1], - color[2], - color[3], - ); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]); - gl.draw_arrays(glow::TRIANGLES, 0, 3); + unsafe { gl.use_program(Some(self.shader_clear_program)) }; + unsafe { + gl.uniform_4_f32( + Some(&self.shader_clear_program_color_uniform_location), + color[0], + color[1], + color[2], + color[3], + ) + }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) }; + unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; if self.draw_buffer_count != 0 { // Reset the draw buffers to what they were before the clear let indices = (0..self.draw_buffer_count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); - gl.draw_buffers(&indices); + unsafe { gl.draw_buffers(&indices) }; } #[cfg(not(target_arch = "wasm32"))] for draw_buffer in 0..self.draw_buffer_count as u32 { - gl.disable_draw_buffer(glow::BLEND, draw_buffer); + unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; } } unsafe fn reset_state(&mut self, gl: &glow::Context) { - gl.use_program(None); - gl.bind_framebuffer(glow::FRAMEBUFFER, None); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.disable(glow::POLYGON_OFFSET_FILL); + unsafe { gl.use_program(None) }; + unsafe { gl.bind_framebuffer(glow::FRAMEBUFFER, None) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) }; if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) { - gl.disable(glow::DEPTH_CLAMP); + unsafe { gl.disable(glow::DEPTH_CLAMP) }; } - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None) }; self.current_index_buffer = None; } @@ -85,34 +87,60 @@ impl super::Queue { ) { match view.inner { super::TextureInner::Renderbuffer { raw } => { - gl.framebuffer_renderbuffer(fbo_target, attachment, glow::RENDERBUFFER, Some(raw)); - } - super::TextureInner::DefaultRenderbuffer => panic!("Unexpected default RBO"), - super::TextureInner::Texture { raw, target } => { - if is_layered_target(target) { - gl.framebuffer_texture_layer( + unsafe { + gl.framebuffer_renderbuffer( fbo_target, attachment, + glow::RENDERBUFFER, Some(raw), - view.mip_levels.start as i32, - view.array_layers.start as i32, - ); + ) + }; + } + super::TextureInner::DefaultRenderbuffer => panic!("Unexpected default RBO"), + super::TextureInner::Texture { raw, target } => { + let num_layers = view.array_layers.end - view.array_layers.start; + if num_layers > 1 { + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] + unsafe { + gl.framebuffer_texture_multiview_ovr( + fbo_target, + attachment, + Some(raw), + view.mip_levels.start as i32, + view.array_layers.start as i32, + num_layers as i32, + ) + }; + } else if is_layered_target(target) { + unsafe { + gl.framebuffer_texture_layer( + fbo_target, + attachment, + Some(raw), + view.mip_levels.start as i32, + view.array_layers.start as i32, + ) + }; } else if target == glow::TEXTURE_CUBE_MAP { - gl.framebuffer_texture_2d( - fbo_target, - attachment, - CUBEMAP_FACES[view.array_layers.start as usize], - Some(raw), - view.mip_levels.start as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + fbo_target, + attachment, + CUBEMAP_FACES[view.array_layers.start as usize], + Some(raw), + view.mip_levels.start as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - fbo_target, - attachment, - target, - Some(raw), - view.mip_levels.start as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + fbo_target, + attachment, + target, + Some(raw), + view.mip_levels.start as i32, + ) + }; } } } @@ -133,14 +161,16 @@ impl super::Queue { instance_count, } => { if instance_count == 1 { - gl.draw_arrays(topology, start_vertex as i32, vertex_count as i32); + unsafe { gl.draw_arrays(topology, start_vertex as i32, vertex_count as i32) }; } else { - gl.draw_arrays_instanced( - topology, - start_vertex as i32, - vertex_count as i32, - instance_count as i32, - ); + unsafe { + gl.draw_arrays_instanced( + topology, + start_vertex as i32, + vertex_count as i32, + instance_count as i32, + ) + }; } } C::DrawIndexed { @@ -151,42 +181,50 @@ impl super::Queue { base_vertex, instance_count, } => match (base_vertex, instance_count) { - (0, 1) => gl.draw_elements( - topology, - index_count as i32, - index_type, - index_offset as i32, - ), - (0, _) => gl.draw_elements_instanced( - topology, - index_count as i32, - index_type, - index_offset as i32, - instance_count as i32, - ), - (_, 1) => gl.draw_elements_base_vertex( - topology, - index_count as i32, - index_type, - index_offset as i32, - base_vertex, - ), - (_, _) => gl.draw_elements_instanced_base_vertex( - topology, - index_count as _, - index_type, - index_offset as i32, - instance_count as i32, - base_vertex, - ), + (0, 1) => unsafe { + gl.draw_elements( + topology, + index_count as i32, + index_type, + index_offset as i32, + ) + }, + (0, _) => unsafe { + gl.draw_elements_instanced( + topology, + index_count as i32, + index_type, + index_offset as i32, + instance_count as i32, + ) + }, + (_, 1) => unsafe { + gl.draw_elements_base_vertex( + topology, + index_count as i32, + index_type, + index_offset as i32, + base_vertex, + ) + }, + (_, _) => unsafe { + gl.draw_elements_instanced_base_vertex( + topology, + index_count as _, + index_type, + index_offset as i32, + instance_count as i32, + base_vertex, + ) + }, }, C::DrawIndirect { topology, indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)); - gl.draw_arrays_indirect_offset(topology, indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { gl.draw_arrays_indirect_offset(topology, indirect_offset as i32) }; } C::DrawIndexedIndirect { topology, @@ -194,18 +232,20 @@ impl super::Queue { indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)); - gl.draw_elements_indirect_offset(topology, index_type, indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { + gl.draw_elements_indirect_offset(topology, index_type, indirect_offset as i32) + }; } C::Dispatch(group_counts) => { - gl.dispatch_compute(group_counts[0], group_counts[1], group_counts[2]); + unsafe { gl.dispatch_compute(group_counts[0], group_counts[1], group_counts[2]) }; } C::DispatchIndirect { indirect_buf, indirect_offset, } => { - gl.bind_buffer(glow::DISPATCH_INDIRECT_BUFFER, Some(indirect_buf)); - gl.dispatch_compute_indirect(indirect_offset as i32); + unsafe { gl.bind_buffer(glow::DISPATCH_INDIRECT_BUFFER, Some(indirect_buf)) }; + unsafe { gl.dispatch_compute_indirect(indirect_offset as i32) }; } C::ClearBuffer { ref dst, @@ -228,24 +268,28 @@ impl super::Queue { || dst_target != glow::ELEMENT_ARRAY_BUFFER; if can_use_zero_buffer { - gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)); - gl.bind_buffer(dst_target, Some(buffer)); + unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)) }; + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; let mut dst_offset = range.start; while dst_offset < range.end { let size = (range.end - dst_offset).min(super::ZERO_BUFFER_SIZE as u64); - gl.copy_buffer_sub_data( - glow::COPY_READ_BUFFER, - dst_target, - 0, - dst_offset as i32, - size as i32, - ); + unsafe { + gl.copy_buffer_sub_data( + glow::COPY_READ_BUFFER, + dst_target, + 0, + dst_offset as i32, + size as i32, + ) + }; dst_offset += size; } } else { - gl.bind_buffer(dst_target, Some(buffer)); + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; let zeroes = vec![0u8; (range.end - range.start) as usize]; - gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes); + unsafe { + gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes) + }; } } None => { @@ -278,49 +322,57 @@ impl super::Queue { let size = copy.size.get() as usize; match (src.raw, dst.raw) { (Some(ref src), Some(ref dst)) => { - gl.bind_buffer(copy_src_target, Some(*src)); - gl.bind_buffer(copy_dst_target, Some(*dst)); - gl.copy_buffer_sub_data( - copy_src_target, - copy_dst_target, - copy.src_offset as _, - copy.dst_offset as _, - copy.size.get() as _, - ); + unsafe { gl.bind_buffer(copy_src_target, Some(*src)) }; + unsafe { gl.bind_buffer(copy_dst_target, Some(*dst)) }; + unsafe { + gl.copy_buffer_sub_data( + copy_src_target, + copy_dst_target, + copy.src_offset as _, + copy.dst_offset as _, + copy.size.get() as _, + ) + }; } (Some(src), None) => { let mut data = dst.data.as_ref().unwrap().lock().unwrap(); let dst_data = &mut data.as_mut_slice() [copy.dst_offset as usize..copy.dst_offset as usize + size]; - gl.bind_buffer(copy_src_target, Some(src)); - self.shared.get_buffer_sub_data( - gl, - copy_src_target, - copy.src_offset as i32, - dst_data, - ); + unsafe { gl.bind_buffer(copy_src_target, Some(src)) }; + unsafe { + self.shared.get_buffer_sub_data( + gl, + copy_src_target, + copy.src_offset as i32, + dst_data, + ) + }; } (None, Some(dst)) => { let data = src.data.as_ref().unwrap().lock().unwrap(); let src_data = &data.as_slice() [copy.src_offset as usize..copy.src_offset as usize + size]; - gl.bind_buffer(copy_dst_target, Some(dst)); - gl.buffer_sub_data_u8_slice( - copy_dst_target, - copy.dst_offset as i32, - src_data, - ); + unsafe { gl.bind_buffer(copy_dst_target, Some(dst)) }; + unsafe { + gl.buffer_sub_data_u8_slice( + copy_dst_target, + copy.dst_offset as i32, + src_data, + ) + }; } (None, None) => { todo!() } } - gl.bind_buffer(copy_src_target, None); + unsafe { gl.bind_buffer(copy_src_target, None) }; if is_index_buffer_only_element_dst { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer); + unsafe { + gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer) + }; } else { - gl.bind_buffer(copy_dst_target, None); + unsafe { gl.bind_buffer(copy_dst_target, None) }; } } C::CopyTextureToTexture { @@ -332,61 +384,71 @@ impl super::Queue { ref copy, } => { //TODO: handle 3D copies - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) }; if is_layered_target(src_target) { //TODO: handle GLES without framebuffer_texture_3d - gl.framebuffer_texture_layer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - Some(src), - copy.src_base.mip_level as i32, - copy.src_base.array_layer as i32, - ); + unsafe { + gl.framebuffer_texture_layer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + Some(src), + copy.src_base.mip_level as i32, + copy.src_base.array_layer as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - src_target, - Some(src), - copy.src_base.mip_level as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + src_target, + Some(src), + copy.src_base.mip_level as i32, + ) + }; } - gl.bind_texture(dst_target, Some(dst)); + unsafe { gl.bind_texture(dst_target, Some(dst)) }; if dst_is_cubemap { - gl.copy_tex_sub_image_2d( - CUBEMAP_FACES[copy.dst_base.array_layer as usize], - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_2d( + CUBEMAP_FACES[copy.dst_base.array_layer as usize], + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } else if is_layered_target(dst_target) { - gl.copy_tex_sub_image_3d( - dst_target, - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.dst_base.origin.z as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_3d( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.dst_base.origin.z as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } else { - gl.copy_tex_sub_image_2d( - dst_target, - copy.dst_base.mip_level as i32, - copy.dst_base.origin.x as i32, - copy.dst_base.origin.y as i32, - copy.src_base.origin.x as i32, - copy.src_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - ); + unsafe { + gl.copy_tex_sub_image_2d( + dst_target, + copy.dst_base.mip_level as i32, + copy.dst_base.origin.x as i32, + copy.dst_base.origin.y as i32, + copy.src_base.origin.x as i32, + copy.src_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + ) + }; } } C::CopyBufferToTexture { @@ -408,15 +470,15 @@ impl super::Queue { .rows_per_image .map_or(0, |rpi| format_info.block_dimensions.1 as u32 * rpi.get()); - gl.bind_texture(dst_target, Some(dst)); - gl.pixel_store_i32(glow::UNPACK_ROW_LENGTH, row_texels as i32); - gl.pixel_store_i32(glow::UNPACK_IMAGE_HEIGHT, column_texels as i32); + unsafe { gl.bind_texture(dst_target, Some(dst)) }; + unsafe { gl.pixel_store_i32(glow::UNPACK_ROW_LENGTH, row_texels as i32) }; + unsafe { gl.pixel_store_i32(glow::UNPACK_IMAGE_HEIGHT, column_texels as i32) }; let mut unbind_unpack_buffer = false; if !format_info.is_compressed() { let buffer_data; let unpack_data = match src.raw { Some(buffer) => { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) }; unbind_unpack_buffer = true; glow::PixelUnpackData::BufferOffset(copy.buffer_layout.offset as u32) } @@ -429,76 +491,86 @@ impl super::Queue { }; match dst_target { glow::TEXTURE_3D => { - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_2D_ARRAY => { - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.array_layer as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.array_layer as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_2D => { - gl.tex_sub_image_2d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_2d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP => { - gl.tex_sub_image_2d( - CUBEMAP_FACES[copy.texture_base.array_layer as usize], - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_2d( + CUBEMAP_FACES[copy.texture_base.array_layer as usize], + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP_ARRAY => { //Note: not sure if this is correct! - gl.tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } _ => unreachable!(), } @@ -526,7 +598,7 @@ impl super::Queue { let buffer_data; let unpack_data = match src.raw { Some(buffer) => { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) }; unbind_unpack_buffer = true; glow::CompressedPixelUnpackData::BufferRange( offset..offset + bytes_in_upload, @@ -544,48 +616,54 @@ impl super::Queue { glow::TEXTURE_3D | glow::TEXTURE_CUBE_MAP_ARRAY | glow::TEXTURE_2D_ARRAY => { - gl.compressed_tex_sub_image_3d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.texture_base.origin.z as i32, - copy.size.width as i32, - copy.size.height as i32, - copy.size.depth as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_3d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.texture_base.origin.z as i32, + copy.size.width as i32, + copy.size.height as i32, + copy.size.depth as i32, + format_desc.internal, + unpack_data, + ) + }; } glow::TEXTURE_2D => { - gl.compressed_tex_sub_image_2d( - dst_target, - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_2d( + dst_target, + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.internal, + unpack_data, + ) + }; } glow::TEXTURE_CUBE_MAP => { - gl.compressed_tex_sub_image_2d( - CUBEMAP_FACES[copy.texture_base.array_layer as usize], - copy.texture_base.mip_level as i32, - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.internal, - unpack_data, - ); + unsafe { + gl.compressed_tex_sub_image_2d( + CUBEMAP_FACES[copy.texture_base.array_layer as usize], + copy.texture_base.mip_level as i32, + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.internal, + unpack_data, + ) + }; } _ => unreachable!(), } } if unbind_unpack_buffer { - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, None); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, None) }; } } C::CopyTextureToBuffer { @@ -615,31 +693,35 @@ impl super::Queue { bpr.get() / format_info.block_size as u32 }); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) }; //TODO: handle cubemap copies if is_layered_target(src_target) { //TODO: handle GLES without framebuffer_texture_3d - gl.framebuffer_texture_layer( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - Some(src), - copy.texture_base.mip_level as i32, - copy.texture_base.array_layer as i32, - ); + unsafe { + gl.framebuffer_texture_layer( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + Some(src), + copy.texture_base.mip_level as i32, + copy.texture_base.array_layer as i32, + ) + }; } else { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - src_target, - Some(src), - copy.texture_base.mip_level as i32, - ); + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + src_target, + Some(src), + copy.texture_base.mip_level as i32, + ) + }; } let mut buffer_data; let unpack_data = match dst.raw { Some(buffer) => { - gl.pixel_store_i32(glow::PACK_ROW_LENGTH, row_texels as i32); - gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(buffer)); + unsafe { gl.pixel_store_i32(glow::PACK_ROW_LENGTH, row_texels as i32) }; + unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(buffer)) }; glow::PixelPackData::BufferOffset(copy.buffer_layout.offset as u32) } None => { @@ -649,25 +731,27 @@ impl super::Queue { glow::PixelPackData::Slice(dst_data) } }; - gl.read_pixels( - copy.texture_base.origin.x as i32, - copy.texture_base.origin.y as i32, - copy.size.width as i32, - copy.size.height as i32, - format_desc.external, - format_desc.data_type, - unpack_data, - ); + unsafe { + gl.read_pixels( + copy.texture_base.origin.x as i32, + copy.texture_base.origin.y as i32, + copy.size.width as i32, + copy.size.height as i32, + format_desc.external, + format_desc.data_type, + unpack_data, + ) + }; } C::SetIndexBuffer(buffer) => { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)) }; self.current_index_buffer = Some(buffer); } C::BeginQuery(query, target) => { - gl.begin_query(target, query); + unsafe { gl.begin_query(target, query) }; } C::EndQuery(target) => { - gl.end_query(target); + unsafe { gl.end_query(target) }; } C::CopyQueryResults { ref query_range, @@ -677,17 +761,21 @@ impl super::Queue { } => { self.temp_query_results.clear(); for &query in queries[query_range.start as usize..query_range.end as usize].iter() { - let result = gl.get_query_parameter_u32(query, glow::QUERY_RESULT); + let result = unsafe { gl.get_query_parameter_u32(query, glow::QUERY_RESULT) }; self.temp_query_results.push(result as u64); } - let query_data = slice::from_raw_parts( - self.temp_query_results.as_ptr() as *const u8, - self.temp_query_results.len() * mem::size_of::(), - ); + let query_data = unsafe { + slice::from_raw_parts( + self.temp_query_results.as_ptr() as *const u8, + self.temp_query_results.len() * mem::size_of::(), + ) + }; match dst.raw { Some(buffer) => { - gl.bind_buffer(dst_target, Some(buffer)); - gl.buffer_sub_data_u8_slice(dst_target, dst_offset as i32, query_data); + unsafe { gl.bind_buffer(dst_target, Some(buffer)) }; + unsafe { + gl.buffer_sub_data_u8_slice(dst_target, dst_offset as i32, query_data) + }; } None => { let data = &mut dst.data.as_ref().unwrap().lock().unwrap(); @@ -698,73 +786,81 @@ impl super::Queue { } C::ResetFramebuffer { is_default } => { if is_default { - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; } else { - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)); - gl.framebuffer_texture_2d( - glow::DRAW_FRAMEBUFFER, - glow::DEPTH_STENCIL_ATTACHMENT, - glow::TEXTURE_2D, - None, - 0, - ); - for i in 0..crate::MAX_COLOR_ATTACHMENTS { - let target = glow::COLOR_ATTACHMENT0 + i as u32; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) }; + unsafe { gl.framebuffer_texture_2d( glow::DRAW_FRAMEBUFFER, - target, + glow::DEPTH_STENCIL_ATTACHMENT, glow::TEXTURE_2D, None, 0, - ); + ) + }; + for i in 0..crate::MAX_COLOR_ATTACHMENTS { + let target = glow::COLOR_ATTACHMENT0 + i as u32; + unsafe { + gl.framebuffer_texture_2d( + glow::DRAW_FRAMEBUFFER, + target, + glow::TEXTURE_2D, + None, + 0, + ) + }; } } - gl.color_mask(true, true, true, true); - gl.depth_mask(true); - gl.stencil_mask(!0); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); + unsafe { gl.color_mask(true, true, true, true) }; + unsafe { gl.depth_mask(true) }; + unsafe { gl.stencil_mask(!0) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; } C::BindAttachment { attachment, ref view, } => { - self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view); + unsafe { self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view) }; } C::ResolveAttachment { attachment, ref dst, ref size, } => { - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)); - gl.read_buffer(attachment); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)); - self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst); - gl.blit_framebuffer( - 0, - 0, - size.width as i32, - size.height as i32, - 0, - 0, - size.width as i32, - size.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)) }; + unsafe { gl.read_buffer(attachment) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)) }; + unsafe { + self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst) + }; + unsafe { + gl.blit_framebuffer( + 0, + 0, + size.width as i32, + size.height as i32, + 0, + 0, + size.width as i32, + size.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) }; } C::InvalidateAttachments(ref list) => { - gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list); + unsafe { gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list) }; } C::SetDrawColorBuffers(count) => { self.draw_buffer_count = count; let indices = (0..count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); - gl.draw_buffers(&indices); + unsafe { gl.draw_buffers(&indices) }; if self .shared @@ -772,7 +868,7 @@ impl super::Queue { .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER) { for draw_buffer in 0..count as u32 { - gl.disable_draw_buffer(glow::BLEND, draw_buffer); + unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; } } } @@ -787,51 +883,58 @@ impl super::Queue { .contains(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR) && is_srgb { - self.perform_shader_clear(gl, draw_buffer, *color); + unsafe { self.perform_shader_clear(gl, draw_buffer, *color) }; } else { - gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color) }; } } C::ClearColorU(draw_buffer, ref color) => { - gl.clear_buffer_u32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_u32_slice(glow::COLOR, draw_buffer, color) }; } C::ClearColorI(draw_buffer, ref color) => { - gl.clear_buffer_i32_slice(glow::COLOR, draw_buffer, color); + unsafe { gl.clear_buffer_i32_slice(glow::COLOR, draw_buffer, color) }; } C::ClearDepth(depth) => { - gl.clear_buffer_f32_slice(glow::DEPTH, 0, &[depth]); + unsafe { gl.clear_buffer_f32_slice(glow::DEPTH, 0, &[depth]) }; } C::ClearStencil(value) => { - gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]); + unsafe { gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]) }; } C::ClearDepthAndStencil(depth, stencil_value) => { - gl.clear_buffer_depth_stencil(glow::DEPTH_STENCIL, 0, depth, stencil_value as i32); + unsafe { + gl.clear_buffer_depth_stencil( + glow::DEPTH_STENCIL, + 0, + depth, + stencil_value as i32, + ) + }; } C::BufferBarrier(raw, usage) => { let mut flags = 0; if usage.contains(crate::BufferUses::VERTEX) { flags |= glow::VERTEX_ATTRIB_ARRAY_BARRIER_BIT; - gl.bind_buffer(glow::ARRAY_BUFFER, Some(raw)); - gl.vertex_attrib_pointer_f32(0, 1, glow::BYTE, true, 0, 0); + unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, Some(raw)) }; + unsafe { gl.vertex_attrib_pointer_f32(0, 1, glow::BYTE, true, 0, 0) }; } if usage.contains(crate::BufferUses::INDEX) { flags |= glow::ELEMENT_ARRAY_BARRIER_BIT; - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::UNIFORM) { flags |= glow::UNIFORM_BARRIER_BIT; } if usage.contains(crate::BufferUses::INDIRECT) { flags |= glow::COMMAND_BARRIER_BIT; - gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::COPY_SRC) { flags |= glow::PIXEL_BUFFER_BARRIER_BIT; - gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(raw)) }; } if usage.contains(crate::BufferUses::COPY_DST) { flags |= glow::PIXEL_BUFFER_BARRIER_BIT; - gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(raw)); + unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(raw)) }; } if usage.intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE) { flags |= glow::BUFFER_UPDATE_BARRIER_BIT; @@ -841,7 +944,7 @@ impl super::Queue { ) { flags |= glow::SHADER_STORAGE_BARRIER_BIT; } - gl.memory_barrier(flags); + unsafe { gl.memory_barrier(flags) }; } C::TextureBarrier(usage) => { let mut flags = 0; @@ -863,18 +966,18 @@ impl super::Queue { ) { flags |= glow::FRAMEBUFFER_BARRIER_BIT; } - gl.memory_barrier(flags); + unsafe { gl.memory_barrier(flags) }; } C::SetViewport { ref rect, ref depth, } => { - gl.viewport(rect.x, rect.y, rect.w, rect.h); - gl.depth_range_f32(depth.start, depth.end); + unsafe { gl.viewport(rect.x, rect.y, rect.w, rect.h) }; + unsafe { gl.depth_range_f32(depth.start, depth.end) }; } C::SetScissor(ref rect) => { - gl.scissor(rect.x, rect.y, rect.w, rect.h); - gl.enable(glow::SCISSOR_TEST); + unsafe { gl.scissor(rect.x, rect.y, rect.w, rect.h) }; + unsafe { gl.enable(glow::SCISSOR_TEST) }; } C::SetStencilFunc { face, @@ -882,127 +985,144 @@ impl super::Queue { reference, read_mask, } => { - gl.stencil_func_separate(face, function, reference as i32, read_mask); + unsafe { gl.stencil_func_separate(face, function, reference as i32, read_mask) }; } C::SetStencilOps { face, write_mask, ref ops, } => { - gl.stencil_mask_separate(face, write_mask); - gl.stencil_op_separate(face, ops.fail, ops.depth_fail, ops.pass); + unsafe { gl.stencil_mask_separate(face, write_mask) }; + unsafe { gl.stencil_op_separate(face, ops.fail, ops.depth_fail, ops.pass) }; } C::SetVertexAttribute { buffer, ref buffer_desc, attribute_desc: ref vat, } => { - gl.bind_buffer(glow::ARRAY_BUFFER, buffer); - gl.enable_vertex_attrib_array(vat.location); + unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, buffer) }; + unsafe { gl.enable_vertex_attrib_array(vat.location) }; if buffer.is_none() { match vat.format_desc.attrib_kind { - super::VertexAttribKind::Float => gl.vertex_attrib_format_f32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - true, // always normalized - vat.offset, - ), - super::VertexAttribKind::Integer => gl.vertex_attrib_format_i32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - vat.offset, - ), + super::VertexAttribKind::Float => unsafe { + gl.vertex_attrib_format_f32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + true, // always normalized + vat.offset, + ) + }, + super::VertexAttribKind::Integer => unsafe { + gl.vertex_attrib_format_i32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + vat.offset, + ) + }, } //Note: there is apparently a bug on AMD 3500U: // this call is ignored if the current array is disabled. - gl.vertex_attrib_binding(vat.location, vat.buffer_index); + unsafe { gl.vertex_attrib_binding(vat.location, vat.buffer_index) }; } else { match vat.format_desc.attrib_kind { - super::VertexAttribKind::Float => gl.vertex_attrib_pointer_f32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - true, // always normalized - buffer_desc.stride as i32, - vat.offset as i32, - ), - super::VertexAttribKind::Integer => gl.vertex_attrib_pointer_i32( - vat.location, - vat.format_desc.element_count, - vat.format_desc.element_format, - buffer_desc.stride as i32, - vat.offset as i32, - ), + super::VertexAttribKind::Float => unsafe { + gl.vertex_attrib_pointer_f32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + true, // always normalized + buffer_desc.stride as i32, + vat.offset as i32, + ) + }, + super::VertexAttribKind::Integer => unsafe { + gl.vertex_attrib_pointer_i32( + vat.location, + vat.format_desc.element_count, + vat.format_desc.element_format, + buffer_desc.stride as i32, + vat.offset as i32, + ) + }, } - gl.vertex_attrib_divisor(vat.location, buffer_desc.step as u32); + unsafe { gl.vertex_attrib_divisor(vat.location, buffer_desc.step as u32) }; } } C::UnsetVertexAttribute(location) => { - gl.disable_vertex_attrib_array(location); + unsafe { gl.disable_vertex_attrib_array(location) }; } C::SetVertexBuffer { index, ref buffer, ref buffer_desc, } => { - gl.vertex_binding_divisor(index, buffer_desc.step as u32); - gl.bind_vertex_buffer( - index, - Some(buffer.raw), - buffer.offset as i32, - buffer_desc.stride as i32, - ); + unsafe { gl.vertex_binding_divisor(index, buffer_desc.step as u32) }; + unsafe { + gl.bind_vertex_buffer( + index, + Some(buffer.raw), + buffer.offset as i32, + buffer_desc.stride as i32, + ) + }; } C::SetDepth(ref depth) => { - gl.depth_func(depth.function); - gl.depth_mask(depth.mask); + unsafe { gl.depth_func(depth.function) }; + unsafe { gl.depth_mask(depth.mask) }; } C::SetDepthBias(bias) => { if bias.is_enabled() { - gl.enable(glow::POLYGON_OFFSET_FILL); - gl.polygon_offset(bias.constant as f32, bias.slope_scale); + unsafe { gl.enable(glow::POLYGON_OFFSET_FILL) }; + unsafe { gl.polygon_offset(bias.constant as f32, bias.slope_scale) }; } else { - gl.disable(glow::POLYGON_OFFSET_FILL); + unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) }; } } C::ConfigureDepthStencil(aspects) => { if aspects.contains(crate::FormatAspects::DEPTH) { - gl.enable(glow::DEPTH_TEST); + unsafe { gl.enable(glow::DEPTH_TEST) }; } else { - gl.disable(glow::DEPTH_TEST); + unsafe { gl.disable(glow::DEPTH_TEST) }; } if aspects.contains(crate::FormatAspects::STENCIL) { - gl.enable(glow::STENCIL_TEST); + unsafe { gl.enable(glow::STENCIL_TEST) }; + } else { + unsafe { gl.disable(glow::STENCIL_TEST) }; + } + } + C::SetAlphaToCoverage(enabled) => { + if enabled { + unsafe { gl.enable(glow::SAMPLE_ALPHA_TO_COVERAGE) }; } else { - gl.disable(glow::STENCIL_TEST); + unsafe { gl.disable(glow::SAMPLE_ALPHA_TO_COVERAGE) }; } } C::SetProgram(program) => { - gl.use_program(Some(program)); + unsafe { gl.use_program(Some(program)) }; } C::SetPrimitive(ref state) => { - gl.front_face(state.front_face); + unsafe { gl.front_face(state.front_face) }; if state.cull_face != 0 { - gl.enable(glow::CULL_FACE); - gl.cull_face(state.cull_face); + unsafe { gl.enable(glow::CULL_FACE) }; + unsafe { gl.cull_face(state.cull_face) }; } else { - gl.disable(glow::CULL_FACE); + unsafe { gl.disable(glow::CULL_FACE) }; } if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) { //Note: this is a bit tricky, since we are controlling the clip, not the clamp. if state.unclipped_depth { - gl.enable(glow::DEPTH_CLAMP); + unsafe { gl.enable(glow::DEPTH_CLAMP) }; } else { - gl.disable(glow::DEPTH_CLAMP); + unsafe { gl.disable(glow::DEPTH_CLAMP) }; } } } C::SetBlendConstant(c) => { - gl.blend_color(c[0], c[1], c[2], c[3]); + unsafe { gl.blend_color(c[0], c[1], c[2], c[3]) }; } C::SetColorTarget { draw_buffer_index, @@ -1010,62 +1130,79 @@ impl super::Queue { } => { use wgt::ColorWrites as Cw; if let Some(index) = draw_buffer_index { - gl.color_mask_draw_buffer( - index, - mask.contains(Cw::RED), - mask.contains(Cw::GREEN), - mask.contains(Cw::BLUE), - mask.contains(Cw::ALPHA), - ); + unsafe { + gl.color_mask_draw_buffer( + index, + mask.contains(Cw::RED), + mask.contains(Cw::GREEN), + mask.contains(Cw::BLUE), + mask.contains(Cw::ALPHA), + ) + }; if let Some(ref blend) = *blend { - gl.enable_draw_buffer(index, glow::BLEND); + unsafe { gl.enable_draw_buffer(index, glow::BLEND) }; if blend.color != blend.alpha { - gl.blend_equation_separate_draw_buffer( - index, - blend.color.equation, - blend.alpha.equation, - ); - gl.blend_func_separate_draw_buffer( - index, - blend.color.src, - blend.color.dst, - blend.alpha.src, - blend.alpha.dst, - ); + unsafe { + gl.blend_equation_separate_draw_buffer( + index, + blend.color.equation, + blend.alpha.equation, + ) + }; + unsafe { + gl.blend_func_separate_draw_buffer( + index, + blend.color.src, + blend.color.dst, + blend.alpha.src, + blend.alpha.dst, + ) + }; } else { - gl.blend_equation_draw_buffer(index, blend.color.equation); - gl.blend_func_draw_buffer(index, blend.color.src, blend.color.dst); + unsafe { gl.blend_equation_draw_buffer(index, blend.color.equation) }; + unsafe { + gl.blend_func_draw_buffer(index, blend.color.src, blend.color.dst) + }; } } else if self .shared .private_caps .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER) { - gl.disable_draw_buffer(index, glow::BLEND); + unsafe { gl.disable_draw_buffer(index, glow::BLEND) }; } } else { - gl.color_mask( - mask.contains(Cw::RED), - mask.contains(Cw::GREEN), - mask.contains(Cw::BLUE), - mask.contains(Cw::ALPHA), - ); + unsafe { + gl.color_mask( + mask.contains(Cw::RED), + mask.contains(Cw::GREEN), + mask.contains(Cw::BLUE), + mask.contains(Cw::ALPHA), + ) + }; if let Some(ref blend) = *blend { - gl.enable(glow::BLEND); + unsafe { gl.enable(glow::BLEND) }; if blend.color != blend.alpha { - gl.blend_equation_separate(blend.color.equation, blend.alpha.equation); - gl.blend_func_separate( - blend.color.src, - blend.color.dst, - blend.alpha.src, - blend.alpha.dst, - ); + unsafe { + gl.blend_equation_separate( + blend.color.equation, + blend.alpha.equation, + ) + }; + unsafe { + gl.blend_func_separate( + blend.color.src, + blend.color.dst, + blend.alpha.src, + blend.alpha.dst, + ) + }; } else { - gl.blend_equation(blend.color.equation); - gl.blend_func(blend.color.src, blend.color.dst); + unsafe { gl.blend_equation(blend.color.equation) }; + unsafe { gl.blend_func(blend.color.src, blend.color.dst) }; } } else { - gl.disable(glow::BLEND); + unsafe { gl.disable(glow::BLEND) }; } } } @@ -1076,40 +1213,44 @@ impl super::Queue { offset, size, } => { - gl.bind_buffer_range(target, slot, Some(buffer), offset, size); + unsafe { gl.bind_buffer_range(target, slot, Some(buffer), offset, size) }; } C::BindSampler(texture_index, sampler) => { - gl.bind_sampler(texture_index, sampler); + unsafe { gl.bind_sampler(texture_index, sampler) }; } C::BindTexture { slot, texture, target, } => { - gl.active_texture(glow::TEXTURE0 + slot); - gl.bind_texture(target, Some(texture)); + unsafe { gl.active_texture(glow::TEXTURE0 + slot) }; + unsafe { gl.bind_texture(target, Some(texture)) }; } C::BindImage { slot, ref binding } => { - gl.bind_image_texture( - slot, - binding.raw, - binding.mip_level as i32, - binding.array_layer.is_none(), - binding.array_layer.unwrap_or_default() as i32, - binding.access, - binding.format, - ); + unsafe { + gl.bind_image_texture( + slot, + binding.raw, + binding.mip_level as i32, + binding.array_layer.is_none(), + binding.array_layer.unwrap_or_default() as i32, + binding.access, + binding.format, + ) + }; } #[cfg(not(target_arch = "wasm32"))] C::InsertDebugMarker(ref range) => { let marker = extract_marker(data_bytes, range); - gl.debug_message_insert( - glow::DEBUG_SOURCE_APPLICATION, - glow::DEBUG_TYPE_MARKER, - DEBUG_ID, - glow::DEBUG_SEVERITY_NOTIFICATION, - marker, - ); + unsafe { + gl.debug_message_insert( + glow::DEBUG_SOURCE_APPLICATION, + glow::DEBUG_TYPE_MARKER, + DEBUG_ID, + glow::DEBUG_SEVERITY_NOTIFICATION, + marker, + ) + }; } #[cfg(target_arch = "wasm32")] C::InsertDebugMarker(_) => (), @@ -1118,11 +1259,15 @@ impl super::Queue { #[cfg(not(target_arch = "wasm32"))] let marker = extract_marker(data_bytes, range); #[cfg(not(target_arch = "wasm32"))] - gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, marker); + unsafe { + gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, marker) + }; } C::PopDebugGroup => { #[cfg(not(target_arch = "wasm32"))] - gl.pop_debug_group(); + unsafe { + gl.pop_debug_group() + }; } C::SetPushConstants { ref uniform, @@ -1143,47 +1288,47 @@ impl super::Queue { match uniform.utype { glow::FLOAT => { let data = get_data::(data_bytes, offset)[0]; - gl.uniform_1_f32(location, data); + unsafe { gl.uniform_1_f32(location, data) }; } glow::FLOAT_VEC2 => { let data = get_data::<[f32; 2]>(data_bytes, offset)[0]; - gl.uniform_2_f32_slice(location, &data); + unsafe { gl.uniform_2_f32_slice(location, &data) }; } glow::FLOAT_VEC3 => { let data = get_data::<[f32; 3]>(data_bytes, offset)[0]; - gl.uniform_3_f32_slice(location, &data); + unsafe { gl.uniform_3_f32_slice(location, &data) }; } glow::FLOAT_VEC4 => { let data = get_data::<[f32; 4]>(data_bytes, offset)[0]; - gl.uniform_4_f32_slice(location, &data); + unsafe { gl.uniform_4_f32_slice(location, &data) }; } glow::INT => { let data = get_data::(data_bytes, offset)[0]; - gl.uniform_1_i32(location, data); + unsafe { gl.uniform_1_i32(location, data) }; } glow::INT_VEC2 => { let data = get_data::<[i32; 2]>(data_bytes, offset)[0]; - gl.uniform_2_i32_slice(location, &data); + unsafe { gl.uniform_2_i32_slice(location, &data) }; } glow::INT_VEC3 => { let data = get_data::<[i32; 3]>(data_bytes, offset)[0]; - gl.uniform_3_i32_slice(location, &data); + unsafe { gl.uniform_3_i32_slice(location, &data) }; } glow::INT_VEC4 => { let data = get_data::<[i32; 4]>(data_bytes, offset)[0]; - gl.uniform_4_i32_slice(location, &data); + unsafe { gl.uniform_4_i32_slice(location, &data) }; } glow::FLOAT_MAT2 => { let data = get_data::<[f32; 4]>(data_bytes, offset)[0]; - gl.uniform_matrix_2_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_2_f32_slice(location, false, &data) }; } glow::FLOAT_MAT3 => { let data = get_data::<[f32; 9]>(data_bytes, offset)[0]; - gl.uniform_matrix_3_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_3_f32_slice(location, false, &data) }; } glow::FLOAT_MAT4 => { let data = get_data::<[f32; 16]>(data_bytes, offset)[0]; - gl.uniform_matrix_4_f32_slice(location, false, &data); + unsafe { gl.uniform_matrix_4_f32_slice(location, false, &data) }; } _ => panic!("Unsupported uniform datatype!"), } @@ -1200,27 +1345,26 @@ impl crate::Queue for super::Queue { ) -> Result<(), crate::DeviceError> { let shared = Arc::clone(&self.shared); let gl = &shared.context.lock(); - self.reset_state(gl); + unsafe { self.reset_state(gl) }; for cmd_buf in command_buffers.iter() { #[cfg(not(target_arch = "wasm32"))] if let Some(ref label) = cmd_buf.label { - gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label); + unsafe { gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label) }; } for command in cmd_buf.commands.iter() { - self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.queries); + unsafe { self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.queries) }; } #[cfg(not(target_arch = "wasm32"))] if cmd_buf.label.is_some() { - gl.pop_debug_group(); + unsafe { gl.pop_debug_group() }; } } if let Some((fence, value)) = signal_fence { fence.maintain(gl); - let sync = gl - .fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) + let sync = unsafe { gl.fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) } .map_err(|_| crate::DeviceError::OutOfMemory)?; fence.pending.push((value, sync)); } @@ -1234,12 +1378,12 @@ impl crate::Queue for super::Queue { texture: super::Texture, ) -> Result<(), crate::SurfaceError> { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] - let gl = &self.shared.context.get_without_egl_lock(); + let gl = unsafe { &self.shared.context.get_without_egl_lock() }; #[cfg(all(target_arch = "wasm32", not(feature = "emscripten")))] let gl = &self.shared.context.glow_context; - surface.present(texture, gl) + unsafe { surface.present(texture, gl) } } unsafe fn get_timestamp_period(&self) -> f32 { diff --git a/wgpu-hal/src/gles/web.rs b/wgpu-hal/src/gles/web.rs index e2f9df0e26..b9e7302182 100644 --- a/wgpu-hal/src/gles/web.rs +++ b/wgpu-hal/src/gles/web.rs @@ -101,7 +101,7 @@ impl crate::Instance for Instance { None => return Vec::new(), }; - super::Adapter::expose(AdapterContext { glow_context: gl }) + unsafe { super::Adapter::expose(AdapterContext { glow_context: gl }) } .into_iter() .collect() } @@ -172,67 +172,67 @@ impl Surface { if swapchain.format.describe().srgb { // Important to set the viewport since we don't know in what state the user left it. - gl.viewport( - 0, - 0, - swapchain.extent.width as _, - swapchain.extent.height as _, - ); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); - gl.bind_sampler(0, None); - gl.active_texture(glow::TEXTURE0); - gl.bind_texture(glow::TEXTURE_2D, self.texture); - gl.use_program(self.srgb_present_program); - gl.disable(glow::DEPTH_TEST); - gl.disable(glow::STENCIL_TEST); - gl.disable(glow::SCISSOR_TEST); - gl.disable(glow::BLEND); - gl.disable(glow::CULL_FACE); - gl.draw_buffers(&[glow::BACK]); - gl.draw_arrays(glow::TRIANGLES, 0, 3); + unsafe { + gl.viewport( + 0, + 0, + swapchain.extent.width as _, + swapchain.extent.height as _, + ) + }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; + unsafe { gl.bind_sampler(0, None) }; + unsafe { gl.active_texture(glow::TEXTURE0) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; + unsafe { gl.use_program(self.srgb_present_program) }; + unsafe { gl.disable(glow::DEPTH_TEST) }; + unsafe { gl.disable(glow::STENCIL_TEST) }; + unsafe { gl.disable(glow::SCISSOR_TEST) }; + unsafe { gl.disable(glow::BLEND) }; + unsafe { gl.disable(glow::CULL_FACE) }; + unsafe { gl.draw_buffers(&[glow::BACK]) }; + unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; } else { - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(swapchain.framebuffer)); - gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(swapchain.framebuffer)) }; + unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; // Note the Y-flipping here. GL's presentation is not flipped, // but main rendering is. Therefore, we Y-flip the output positions // in the shader, and also this blit. - gl.blit_framebuffer( - 0, - swapchain.extent.height as i32, - swapchain.extent.width as i32, - 0, - 0, - 0, - swapchain.extent.width as i32, - swapchain.extent.height as i32, - glow::COLOR_BUFFER_BIT, - glow::NEAREST, - ); + unsafe { + gl.blit_framebuffer( + 0, + swapchain.extent.height as i32, + swapchain.extent.width as i32, + 0, + 0, + 0, + swapchain.extent.width as i32, + swapchain.extent.height as i32, + glow::COLOR_BUFFER_BIT, + glow::NEAREST, + ) + }; } Ok(()) } unsafe fn create_srgb_present_program(gl: &glow::Context) -> glow::Program { - let program = gl - .create_program() - .expect("Could not create shader program"); - let vertex = gl - .create_shader(glow::VERTEX_SHADER) - .expect("Could not create shader"); - gl.shader_source(vertex, include_str!("./shaders/srgb_present.vert")); - gl.compile_shader(vertex); - let fragment = gl - .create_shader(glow::FRAGMENT_SHADER) - .expect("Could not create shader"); - gl.shader_source(fragment, include_str!("./shaders/srgb_present.frag")); - gl.compile_shader(fragment); - gl.attach_shader(program, vertex); - gl.attach_shader(program, fragment); - gl.link_program(program); - gl.delete_shader(vertex); - gl.delete_shader(fragment); - gl.bind_texture(glow::TEXTURE_2D, None); + let program = unsafe { gl.create_program() }.expect("Could not create shader program"); + let vertex = + unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(vertex, include_str!("./shaders/srgb_present.vert")) }; + unsafe { gl.compile_shader(vertex) }; + let fragment = + unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader"); + unsafe { gl.shader_source(fragment, include_str!("./shaders/srgb_present.frag")) }; + unsafe { gl.compile_shader(fragment) }; + unsafe { gl.attach_shader(program, vertex) }; + unsafe { gl.attach_shader(program, fragment) }; + unsafe { gl.link_program(program) }; + unsafe { gl.delete_shader(vertex) }; + unsafe { gl.delete_shader(fragment) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; program } @@ -253,49 +253,57 @@ impl crate::Surface for Surface { if let Some(swapchain) = self.swapchain.take() { // delete all frame buffers already allocated - gl.delete_framebuffer(swapchain.framebuffer); + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } if self.srgb_present_program.is_none() && config.format.describe().srgb { - self.srgb_present_program = Some(Self::create_srgb_present_program(gl)); + self.srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); } if let Some(texture) = self.texture.take() { - gl.delete_texture(texture); + unsafe { gl.delete_texture(texture) }; } - self.texture = Some(gl.create_texture().unwrap()); + self.texture = Some(unsafe { gl.create_texture() }.unwrap()); let desc = device.shared.describe_texture_format(config.format); - gl.bind_texture(glow::TEXTURE_2D, self.texture); - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MIN_FILTER, - glow::NEAREST as _, - ); - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MAG_FILTER, - glow::NEAREST as _, - ); - gl.tex_storage_2d( - glow::TEXTURE_2D, - 1, - desc.internal, - config.extent.width as i32, - config.extent.height as i32, - ); - - let framebuffer = gl.create_framebuffer().unwrap(); - gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)); - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::TEXTURE_2D, - self.texture, - 0, - ); - gl.bind_texture(glow::TEXTURE_2D, None); + unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MIN_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MAG_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_storage_2d( + glow::TEXTURE_2D, + 1, + desc.internal, + config.extent.width as i32, + config.extent.height as i32, + ) + }; + + let framebuffer = unsafe { gl.create_framebuffer() }.unwrap(); + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::TEXTURE_2D, + self.texture, + 0, + ) + }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; self.swapchain = Some(Swapchain { extent: config.extent, @@ -310,10 +318,10 @@ impl crate::Surface for Surface { unsafe fn unconfigure(&mut self, device: &super::Device) { let gl = device.shared.context.lock(); if let Some(swapchain) = self.swapchain.take() { - gl.delete_framebuffer(swapchain.framebuffer); + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } if let Some(renderbuffer) = self.texture.take() { - gl.delete_texture(renderbuffer); + unsafe { gl.delete_texture(renderbuffer) }; } } diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index 02fa246431..b688e326d6 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -14,6 +14,7 @@ * - secondary backends (DX11/GLES): 0.5 each */ +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow( // for `if_then_panic` until it reaches stable unknown_lints, @@ -41,6 +42,7 @@ #![warn( trivial_casts, trivial_numeric_casts, + unsafe_op_in_unsafe_fn, unused_extern_crates, unused_qualifications, // We don't match on a reference, unless required. @@ -581,15 +583,20 @@ bitflags!( /// Format can be used as depth-stencil and input attachment. const DEPTH_STENCIL_ATTACHMENT = 1 << 8; - /// Format can be multisampled. - const MULTISAMPLE = 1 << 9; + /// Format can be multisampled by x2. + const MULTISAMPLE_X2 = 1 << 9; + /// Format can be multisampled by x4. + const MULTISAMPLE_X4 = 1 << 10; + /// Format can be multisampled by x8. + const MULTISAMPLE_X8 = 1 << 11; + /// Format can be used for render pass resolve targets. - const MULTISAMPLE_RESOLVE = 1 << 10; + const MULTISAMPLE_RESOLVE = 1 << 12; /// Format can be copied from. - const COPY_SRC = 1 << 11; + const COPY_SRC = 1 << 13; /// Format can be copied to. - const COPY_DST = 1 << 12; + const COPY_DST = 1 << 14; } ); @@ -878,8 +885,27 @@ pub struct PipelineLayoutDescriptor<'a, A: Api> { #[derive(Debug)] pub struct BufferBinding<'a, A: Api> { + /// The buffer being bound. pub buffer: &'a A::Buffer, + + /// The offset at which the bound region starts. + /// + /// This must be less than the size of the buffer. Some back ends + /// cannot tolerate zero-length regions; for example, see + /// [VUID-VkDescriptorBufferInfo-offset-00340][340] and + /// [VUID-VkDescriptorBufferInfo-range-00341][341], or the + /// documentation for GLES's [glBindBufferRange][bbr]. + /// + /// [340]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkDescriptorBufferInfo-offset-00340 + /// [341]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkDescriptorBufferInfo-range-00341 + /// [bbr]: https://registry.khronos.org/OpenGL-Refpages/es3.0/html/glBindBufferRange.xhtml pub offset: wgt::BufferAddress, + + /// The size of the region bound, in bytes. + /// + /// If `None`, the region extends from `offset` to the end of the + /// buffer. Given the restrictions on `offset`, this means that + /// the size is always greater than zero. pub size: Option, } diff --git a/wgpu-hal/src/metal/adapter.rs b/wgpu-hal/src/metal/adapter.rs index a8bf9b18c3..201b6960b9 100644 --- a/wgpu-hal/src/metal/adapter.rs +++ b/wgpu-hal/src/metal/adapter.rs @@ -56,16 +56,8 @@ impl crate::Adapter for super::Adapter { (Tfc::STORAGE_READ_WRITE, Tfc::STORAGE_READ_WRITE) } }; - let msaa_desktop_if = if pc.msaa_desktop { - Tfc::MULTISAMPLE - } else { - Tfc::empty() - }; - let msaa_apple7x_if = if pc.msaa_desktop | pc.msaa_apple7 { - Tfc::MULTISAMPLE - } else { - Tfc::empty() - }; + let msaa_count = pc.sample_count_mask; + let msaa_resolve_desktop_if = if pc.msaa_desktop { Tfc::MULTISAMPLE_RESOLVE } else { @@ -90,7 +82,7 @@ impl crate::Adapter for super::Adapter { | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND - | Tfc::MULTISAMPLE + | msaa_count | Tfc::MULTISAMPLE_RESOLVE; let extra = match format { @@ -110,7 +102,7 @@ impl crate::Adapter for super::Adapter { | Tf::Rgba8Sint | Tf::Rgba16Uint | Tf::Rgba16Sint => { - read_write_tier2_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE + read_write_tier2_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_count } Tf::R16Unorm | Tf::R16Snorm @@ -122,26 +114,23 @@ impl crate::Adapter for super::Adapter { | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND - | Tfc::MULTISAMPLE + | msaa_count | msaa_resolve_desktop_if } Tf::Rg8Unorm | Tf::Rg16Float | Tf::Bgra8Unorm => all_caps, - Tf::Rg8Uint | Tf::Rg8Sint => Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE, + Tf::Rg8Uint | Tf::Rg8Sint => Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_count, Tf::R32Uint | Tf::R32Sint => { - read_write_tier1_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_desktop_if + read_write_tier1_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_count } Tf::R32Float => { let flags = if pc.format_r32float_all { all_caps } else { - Tfc::STORAGE - | Tfc::COLOR_ATTACHMENT - | Tfc::COLOR_ATTACHMENT_BLEND - | Tfc::MULTISAMPLE + Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND | msaa_count }; read_write_tier1_if | flags } - Tf::Rg16Uint | Tf::Rg16Sint => Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::MULTISAMPLE, + Tf::Rg16Uint | Tf::Rg16Sint => Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_count, Tf::Rgba8UnormSrgb | Tf::Bgra8UnormSrgb => { let mut flags = all_caps; flags.set(Tfc::STORAGE, pc.format_rgba8_srgb_all); @@ -157,26 +146,23 @@ impl crate::Adapter for super::Adapter { flags.set(Tfc::STORAGE, pc.format_rg11b10_all); flags } - Tf::Rg32Uint | Tf::Rg32Sint => Tfc::COLOR_ATTACHMENT | Tfc::STORAGE | msaa_apple7x_if, + Tf::Rg32Uint | Tf::Rg32Sint => Tfc::COLOR_ATTACHMENT | Tfc::STORAGE | msaa_count, Tf::Rg32Float => { if pc.format_rg32float_all { all_caps } else { - Tfc::STORAGE - | Tfc::COLOR_ATTACHMENT - | Tfc::COLOR_ATTACHMENT_BLEND - | msaa_apple7x_if + Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND | msaa_count } } Tf::Rgba32Uint | Tf::Rgba32Sint => { - read_write_tier2_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_desktop_if + read_write_tier2_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT | msaa_count } Tf::Rgba32Float => { let mut flags = read_write_tier2_if | Tfc::STORAGE | Tfc::COLOR_ATTACHMENT; if pc.format_rgba32float_all { flags |= all_caps } else if pc.msaa_apple7 { - flags |= Tfc::MULTISAMPLE + flags |= msaa_count }; flags } @@ -189,7 +175,7 @@ impl crate::Adapter for super::Adapter { }*/ Tf::Depth16Unorm => { let mut flags = - Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::MULTISAMPLE | msaa_resolve_apple3x_if; + Tfc::DEPTH_STENCIL_ATTACHMENT | msaa_count | msaa_resolve_apple3x_if; if pc.format_depth16unorm { flags |= Tfc::SAMPLED_LINEAR } @@ -197,14 +183,14 @@ impl crate::Adapter for super::Adapter { } Tf::Depth32Float | Tf::Depth32FloatStencil8 => { let mut flags = - Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::MULTISAMPLE | msaa_resolve_apple3x_if; + Tfc::DEPTH_STENCIL_ATTACHMENT | msaa_count | msaa_resolve_apple3x_if; if pc.format_depth32float_filter { flags |= Tfc::SAMPLED_LINEAR } flags } Tf::Depth24Plus | Tf::Depth24PlusStencil8 => { - let mut flags = Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::MULTISAMPLE; + let mut flags = Tfc::DEPTH_STENCIL_ATTACHMENT | msaa_count; if pc.format_depth24_stencil8 { flags |= Tfc::SAMPLED_LINEAR | Tfc::MULTISAMPLE_RESOLVE } else { @@ -224,7 +210,7 @@ impl crate::Adapter for super::Adapter { Tfc::SAMPLED_LINEAR | Tfc::COLOR_ATTACHMENT | Tfc::COLOR_ATTACHMENT_BLEND - | Tfc::MULTISAMPLE + | msaa_count | Tfc::MULTISAMPLE_RESOLVE } } @@ -489,12 +475,12 @@ impl super::PrivateCapabilities { version.is_mac = os_is_mac; let family_check = version.at_least((10, 15), (13, 0)); - let mut sample_count_mask: u8 = 1 | 4; // 1 and 4 samples are supported on all devices + let mut sample_count_mask = crate::TextureFormatCapabilities::MULTISAMPLE_X4; // 1 and 4 samples are supported on all devices if device.supports_texture_sample_count(2) { - sample_count_mask |= 2; + sample_count_mask |= crate::TextureFormatCapabilities::MULTISAMPLE_X2; } if device.supports_texture_sample_count(8) { - sample_count_mask |= 8; + sample_count_mask |= crate::TextureFormatCapabilities::MULTISAMPLE_X8; } let rw_texture_tier = if version.at_least((10, 13), (11, 0)) { @@ -778,7 +764,6 @@ impl super::PrivateCapabilities { features.set(F::TEXTURE_COMPRESSION_ETC2, self.format_eac_etc); features.set(F::DEPTH_CLIP_CONTROL, self.supports_depth_clip_control); - features.set(F::DEPTH24PLUS_STENCIL8, self.format_depth24_stencil8); features.set( F::TEXTURE_BINDING_ARRAY diff --git a/wgpu-hal/src/metal/device.rs b/wgpu-hal/src/metal/device.rs index 9ff02a4413..a2d151c02c 100644 --- a/wgpu-hal/src/metal/device.rs +++ b/wgpu-hal/src/metal/device.rs @@ -16,7 +16,17 @@ struct CompiledShader { function: mtl::Function, wg_size: mtl::MTLSize, wg_memory_sizes: Vec, + + /// Bindings of WGSL `storage` globals that contain variable-sized arrays. + /// + /// In order to implement bounds checks and the `arrayLength` function for + /// WGSL runtime-sized arrays, we pass the entry point a struct with a + /// member for each global variable that contains such an array. That member + /// is a `u32` holding the variable's total size in bytes---which is simply + /// the size of the `Buffer` supplying that variable's contents for the + /// draw call. sized_bindings: Vec, + immutable_buffer_mask: usize, } @@ -256,7 +266,7 @@ impl crate::Device for super::Device { let ptr = buffer.raw.contents() as *mut u8; assert!(!ptr.is_null()); Ok(crate::BufferMapping { - ptr: ptr::NonNull::new(ptr.offset(range.start as isize)).unwrap(), + ptr: ptr::NonNull::new(unsafe { ptr.offset(range.start as isize) }).unwrap(), is_coherent: true, }) } @@ -724,6 +734,8 @@ impl crate::Device for super::Device { let end = start + size as usize; bg.buffers .extend(desc.buffers[start..end].iter().map(|source| { + // Given the restrictions on `BufferBinding::offset`, + // this should never be `None`. let remaining_size = wgt::BufferSize::new(source.buffer.size - source.offset); let binding_size = match ty { diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index 62a9b3b087..b36dbe473a 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -88,19 +88,18 @@ impl crate::Instance for Instance { #[cfg(target_os = "ios")] raw_window_handle::RawWindowHandle::UiKit(handle) => { let _ = &self.managed_metal_layer_delegate; - Ok(Surface::from_view(handle.ui_view, None)) + Ok(unsafe { Surface::from_view(handle.ui_view, None) }) } #[cfg(target_os = "macos")] - raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(Surface::from_view( - handle.ns_view, - Some(&self.managed_metal_layer_delegate), - )), + raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(unsafe { + Surface::from_view(handle.ns_view, Some(&self.managed_metal_layer_delegate)) + }), _ => Err(crate::InstanceError), } } unsafe fn destroy_surface(&self, surface: Surface) { - surface.dispose(); + unsafe { surface.dispose() }; } unsafe fn enumerate_adapters(&self) -> Vec> { @@ -219,7 +218,7 @@ struct PrivateCapabilities { max_varying_components: u32, max_threads_per_group: u32, max_total_threadgroup_memory: u32, - sample_count_mask: u8, + sample_count_mask: crate::TextureFormatCapabilities, supports_debug_markers: bool, supports_binary_archives: bool, supports_capture_manager: bool, @@ -584,7 +583,17 @@ struct BufferResource { ptr: BufferPtr, offset: wgt::BufferAddress, dynamic_index: Option, + + /// The buffer's size, if it is a [`Storage`] binding. Otherwise `None`. + /// + /// Buffers with the [`wgt::BufferBindingType::Storage`] binding type can + /// hold WGSL runtime-sized arrays. When one does, we must pass its size to + /// shader entry points to implement bounds checks and WGSL's `arrayLength` + /// function. See [`device::CompiledShader::sized_bindings`] for details. + /// + /// [`Storage`]: wgt::BufferBindingType::Storage binding_size: Option, + binding_location: u32, } @@ -607,7 +616,15 @@ pub struct ShaderModule { #[derive(Debug, Default)] struct PipelineStageInfo { push_constants: Option, + + /// The buffer argument table index at which we pass runtime-sized arrays' buffer sizes. + /// + /// See [`device::CompiledShader::sized_bindings`] for more details. sizes_slot: Option, + + /// Bindings of all WGSL `storage` globals that contain runtime-sized arrays. + /// + /// See [`device::CompiledShader::sized_bindings`] for more details. sized_bindings: Vec, } @@ -714,7 +731,28 @@ struct CommandState { index: Option, raw_wg_size: mtl::MTLSize, stage_infos: MultiStageData, + + /// Sizes of currently bound [`wgt::BufferBindingType::Storage`] buffers. + /// + /// Specifically: + /// + /// - The keys are ['ResourceBinding`] values (that is, the WGSL `@group` + /// and `@binding` attributes) for `var` global variables in the + /// current module that contain runtime-sized arrays. + /// + /// - The values are the actual sizes of the buffers currently bound to + /// provide those globals' contents, which are needed to implement bounds + /// checks and the WGSL `arrayLength` function. + /// + /// For each stage `S` in `stage_infos`, we consult this to find the sizes + /// of the buffers listed in [`stage_infos.S.sized_bindings`], which we must + /// pass to the entry point. + /// + /// See [`device::CompiledShader::sized_bindings`] for more details. + /// + /// [`ResourceBinding`]: naga::ResourceBinding storage_buffer_length_map: fxhash::FxHashMap, + work_group_memory_sizes: Vec, push_constants: Vec, } diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 1e0f1070b8..fffad30f03 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -83,9 +83,11 @@ impl super::Surface { delegate: Option<&HalManagedMetalLayerDelegate>, ) -> Self { let view = view as *mut Object; - let render_layer = - mem::transmute::<_, &mtl::MetalLayerRef>(Self::get_metal_layer(view, delegate)) - .to_owned(); + let render_layer = { + let layer = unsafe { Self::get_metal_layer(view, delegate) }; + unsafe { mem::transmute::<_, &mtl::MetalLayerRef>(layer) } + } + .to_owned(); let _: *mut c_void = msg_send![view, retain]; Self::new(NonNull::new(view), render_layer) } @@ -98,7 +100,7 @@ impl super::Surface { } /// If not called on the main thread, this will panic. - pub unsafe fn get_metal_layer( + pub(crate) unsafe fn get_metal_layer( view: *mut Object, delegate: Option<&HalManagedMetalLayerDelegate>, ) -> *mut Object { @@ -136,7 +138,7 @@ impl super::Surface { { let () = msg_send![view, setLayer: new_layer]; let () = msg_send![view, setWantsLayer: YES]; - let () = msg_send![new_layer, setContentsGravity: kCAGravityTopLeft]; + let () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityTopLeft }]; let window: *mut Object = msg_send![view, window]; if !window.is_null() { let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; @@ -207,7 +209,7 @@ impl crate::Surface for super::Surface { let () = msg_send![*render_layer, setFrame: bounds]; } } - render_layer.set_device(&*device_raw); + render_layer.set_device(&device_raw); render_layer.set_pixel_format(self.raw_swapchain_format); render_layer.set_framebuffer_only(framebuffer_only); render_layer.set_presents_with_transaction(self.present_with_transaction); diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index 0c035f6ae7..72ae9ce90e 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -481,17 +481,6 @@ impl PhysicalDeviceFeatures { ), ); - features.set( - F::DEPTH24PLUS_STENCIL8, - supports_format( - instance, - phd, - vk::Format::D24_UNORM_S8_UINT, - vk::ImageTiling::OPTIMAL, - vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT, - ), - ); - (features, dl_flags) } @@ -1121,9 +1110,11 @@ impl super::Adapter { ) -> Result, crate::DeviceError> { let mem_properties = { profiling::scope!("vkGetPhysicalDeviceMemoryProperties"); - self.instance - .raw - .get_physical_device_memory_properties(self.raw) + unsafe { + self.instance + .raw + .get_physical_device_memory_properties(self.raw) + } }; let memory_types = &mem_properties.memory_types[..mem_properties.memory_type_count as usize]; @@ -1230,7 +1221,7 @@ impl super::Adapter { let raw_queue = { profiling::scope!("vkGetDeviceQueue"); - raw_device.get_device_queue(family_index, queue_index) + unsafe { raw_device.get_device_queue(family_index, queue_index) } }; let shared = Arc::new(super::DeviceShared { @@ -1257,9 +1248,11 @@ impl super::Adapter { }); let mut relay_semaphores = [vk::Semaphore::null(); 2]; for sem in relay_semaphores.iter_mut() { - *sem = shared - .raw - .create_semaphore(&vk::SemaphoreCreateInfo::builder(), None)?; + unsafe { + *sem = shared + .raw + .create_semaphore(&vk::SemaphoreCreateInfo::builder(), None)? + }; } let queue = super::Queue { raw: raw_queue, @@ -1355,18 +1348,20 @@ impl crate::Adapter for super::Adapter { .build(); let raw_device = { profiling::scope!("vkCreateDevice"); - self.instance.raw.create_device(self.raw, &info, None)? + unsafe { self.instance.raw.create_device(self.raw, &info, None)? } }; - self.device_from_raw( - raw_device, - true, - &enabled_extensions, - features, - uab_types, - family_info.queue_family_index, - 0, - ) + unsafe { + self.device_from_raw( + raw_device, + true, + &enabled_extensions, + features, + uab_types, + family_info.queue_family_index, + 0, + ) + } } unsafe fn texture_format_capabilities( @@ -1376,10 +1371,11 @@ impl crate::Adapter for super::Adapter { use crate::TextureFormatCapabilities as Tfc; let vk_format = self.private_caps.map_texture_format(format); - let properties = self - .instance - .raw - .get_physical_device_format_properties(self.raw, vk_format); + let properties = unsafe { + self.instance + .raw + .get_physical_device_format_properties(self.raw, vk_format) + }; let features = properties.optimal_tiling_features; let mut flags = Tfc::empty(); @@ -1428,10 +1424,42 @@ impl crate::Adapter for super::Adapter { ), ); // Vulkan is very permissive about MSAA + flags.set(Tfc::MULTISAMPLE_RESOLVE, !format.describe().is_compressed()); + + // get the supported sample counts + let format_aspect = crate::FormatAspects::from(format); + let limits = self.phd_capabilities.properties.limits; + + let sample_flags = if format_aspect.contains(crate::FormatAspects::DEPTH) { + limits + .framebuffer_depth_sample_counts + .min(limits.sampled_image_depth_sample_counts) + } else if format_aspect.contains(crate::FormatAspects::STENCIL) { + limits + .framebuffer_stencil_sample_counts + .min(limits.sampled_image_stencil_sample_counts) + } else { + limits + .framebuffer_color_sample_counts + .min(limits.sampled_image_color_sample_counts) + .min(limits.sampled_image_integer_sample_counts) + .min(limits.storage_image_sample_counts) + }; + + flags.set( + Tfc::MULTISAMPLE_X2, + sample_flags.contains(vk::SampleCountFlags::TYPE_2), + ); flags.set( - Tfc::MULTISAMPLE | Tfc::MULTISAMPLE_RESOLVE, - !format.describe().is_compressed(), + Tfc::MULTISAMPLE_X4, + sample_flags.contains(vk::SampleCountFlags::TYPE_4), ); + + flags.set( + Tfc::MULTISAMPLE_X8, + sample_flags.contains(vk::SampleCountFlags::TYPE_8), + ); + flags } @@ -1445,11 +1473,13 @@ impl crate::Adapter for super::Adapter { let queue_family_index = 0; //TODO { profiling::scope!("vkGetPhysicalDeviceSurfaceSupportKHR"); - match surface.functor.get_physical_device_surface_support( - self.raw, - queue_family_index, - surface.raw, - ) { + match unsafe { + surface.functor.get_physical_device_surface_support( + self.raw, + queue_family_index, + surface.raw, + ) + } { Ok(true) => (), Ok(false) => return None, Err(e) => { @@ -1461,10 +1491,11 @@ impl crate::Adapter for super::Adapter { let caps = { profiling::scope!("vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); - match surface - .functor - .get_physical_device_surface_capabilities(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_capabilities(self.raw, surface.raw) + } { Ok(caps) => caps, Err(e) => { log::error!("get_physical_device_surface_capabilities: {}", e); @@ -1506,10 +1537,11 @@ impl crate::Adapter for super::Adapter { let raw_present_modes = { profiling::scope!("vkGetPhysicalDeviceSurfacePresentModesKHR"); - match surface - .functor - .get_physical_device_surface_present_modes(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_present_modes(self.raw, surface.raw) + } { Ok(present_modes) => present_modes, Err(e) => { log::error!("get_physical_device_surface_present_modes: {}", e); @@ -1520,10 +1552,11 @@ impl crate::Adapter for super::Adapter { let raw_surface_formats = { profiling::scope!("vkGetPhysicalDeviceSurfaceFormatsKHR"); - match surface - .functor - .get_physical_device_surface_formats(self.raw, surface.raw) - { + match unsafe { + surface + .functor + .get_physical_device_surface_formats(self.raw, surface.raw) + } { Ok(formats) => formats, Err(e) => { log::error!("get_physical_device_surface_formats: {}", e); diff --git a/wgpu-hal/src/vulkan/command.rs b/wgpu-hal/src/vulkan/command.rs index 96a6384836..d266cd1f47 100644 --- a/wgpu-hal/src/vulkan/command.rs +++ b/wgpu-hal/src/vulkan/command.rs @@ -50,18 +50,20 @@ impl crate::CommandEncoder for super::CommandEncoder { .command_pool(self.raw) .command_buffer_count(ALLOCATION_GRANULARITY) .build(); - let cmd_buf_vec = self.device.raw.allocate_command_buffers(&vk_info)?; + let cmd_buf_vec = unsafe { self.device.raw.allocate_command_buffers(&vk_info)? }; self.free.extend(cmd_buf_vec); } let raw = self.free.pop().unwrap(); // Set the name unconditionally, since there might be a // previous name assigned to this. - self.device.set_object_name( - vk::ObjectType::COMMAND_BUFFER, - raw, - label.unwrap_or_default(), - ); + unsafe { + self.device.set_object_name( + vk::ObjectType::COMMAND_BUFFER, + raw, + label.unwrap_or_default(), + ) + }; // Reset this in case the last renderpass was never ended. self.rpass_debug_marker_active = false; @@ -69,7 +71,7 @@ impl crate::CommandEncoder for super::CommandEncoder { let vk_info = vk::CommandBufferBeginInfo::builder() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT) .build(); - self.device.raw.begin_command_buffer(raw, &vk_info)?; + unsafe { self.device.raw.begin_command_buffer(raw, &vk_info) }?; self.active = raw; Ok(()) @@ -78,7 +80,7 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn end_encoding(&mut self) -> Result { let raw = self.active; self.active = vk::CommandBuffer::null(); - self.device.raw.end_command_buffer(raw)?; + unsafe { self.device.raw.end_command_buffer(raw) }?; Ok(super::CommandBuffer { raw }) } @@ -95,10 +97,11 @@ impl crate::CommandEncoder for super::CommandEncoder { self.free .extend(cmd_bufs.into_iter().map(|cmd_buf| cmd_buf.raw)); self.free.append(&mut self.discarded); - let _ = self - .device - .raw - .reset_command_pool(self.raw, vk::CommandPoolResetFlags::default()); + let _ = unsafe { + self.device + .raw + .reset_command_pool(self.raw, vk::CommandPoolResetFlags::default()) + }; } unsafe fn transition_buffers<'a, T>(&mut self, barriers: T) @@ -128,15 +131,17 @@ impl crate::CommandEncoder for super::CommandEncoder { } if !vk_barriers.is_empty() { - self.device.raw.cmd_pipeline_barrier( - self.active, - src_stages, - dst_stages, - vk::DependencyFlags::empty(), - &[], - vk_barriers, - &[], - ); + unsafe { + self.device.raw.cmd_pipeline_barrier( + self.active, + src_stages, + dst_stages, + vk::DependencyFlags::empty(), + &[], + vk_barriers, + &[], + ) + }; } } @@ -171,26 +176,30 @@ impl crate::CommandEncoder for super::CommandEncoder { } if !vk_barriers.is_empty() { - self.device.raw.cmd_pipeline_barrier( - self.active, - src_stages, - dst_stages, - vk::DependencyFlags::empty(), - &[], - &[], - vk_barriers, - ); + unsafe { + self.device.raw.cmd_pipeline_barrier( + self.active, + src_stages, + dst_stages, + vk::DependencyFlags::empty(), + &[], + &[], + vk_barriers, + ) + }; } } unsafe fn clear_buffer(&mut self, buffer: &super::Buffer, range: crate::MemoryRange) { - self.device.raw.cmd_fill_buffer( - self.active, - buffer.raw, - range.start, - range.end - range.start, - 0, - ); + unsafe { + self.device.raw.cmd_fill_buffer( + self.active, + buffer.raw, + range.start, + range.end - range.start, + 0, + ) + }; } unsafe fn copy_buffer_to_buffer( @@ -207,12 +216,14 @@ impl crate::CommandEncoder for super::CommandEncoder { size: r.size.get(), }); - self.device.raw.cmd_copy_buffer( - self.active, - src.raw, - dst.raw, - &smallvec::SmallVec::<[vk::BufferCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_buffer( + self.active, + src.raw, + dst.raw, + &smallvec::SmallVec::<[vk::BufferCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_texture_to_texture( @@ -244,14 +255,16 @@ impl crate::CommandEncoder for super::CommandEncoder { } }); - self.device.raw.cmd_copy_image( - self.active, - src.raw, - src_layout, - dst.raw, - DST_IMAGE_LAYOUT, - &smallvec::SmallVec::<[vk::ImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_image( + self.active, + src.raw, + src_layout, + dst.raw, + DST_IMAGE_LAYOUT, + &smallvec::SmallVec::<[vk::ImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_buffer_to_texture( @@ -264,13 +277,15 @@ impl crate::CommandEncoder for super::CommandEncoder { { let vk_regions_iter = dst.map_buffer_copies(regions); - self.device.raw.cmd_copy_buffer_to_image( - self.active, - src.raw, - dst.raw, - DST_IMAGE_LAYOUT, - &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_buffer_to_image( + self.active, + src.raw, + dst.raw, + DST_IMAGE_LAYOUT, + &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn copy_texture_to_buffer( @@ -285,41 +300,49 @@ impl crate::CommandEncoder for super::CommandEncoder { let src_layout = conv::derive_image_layout(src_usage, src.aspects); let vk_regions_iter = src.map_buffer_copies(regions); - self.device.raw.cmd_copy_image_to_buffer( - self.active, - src.raw, - src_layout, - dst.raw, - &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), - ); + unsafe { + self.device.raw.cmd_copy_image_to_buffer( + self.active, + src.raw, + src_layout, + dst.raw, + &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter), + ) + }; } unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_begin_query( - self.active, - set.raw, - index, - vk::QueryControlFlags::empty(), - ); + unsafe { + self.device.raw.cmd_begin_query( + self.active, + set.raw, + index, + vk::QueryControlFlags::empty(), + ) + }; } unsafe fn end_query(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_end_query(self.active, set.raw, index); + unsafe { self.device.raw.cmd_end_query(self.active, set.raw, index) }; } unsafe fn write_timestamp(&mut self, set: &super::QuerySet, index: u32) { - self.device.raw.cmd_write_timestamp( - self.active, - vk::PipelineStageFlags::BOTTOM_OF_PIPE, - set.raw, - index, - ); + unsafe { + self.device.raw.cmd_write_timestamp( + self.active, + vk::PipelineStageFlags::BOTTOM_OF_PIPE, + set.raw, + index, + ) + }; } unsafe fn reset_queries(&mut self, set: &super::QuerySet, range: Range) { - self.device.raw.cmd_reset_query_pool( - self.active, - set.raw, - range.start, - range.end - range.start, - ); + unsafe { + self.device.raw.cmd_reset_query_pool( + self.active, + set.raw, + range.start, + range.end - range.start, + ) + }; } unsafe fn copy_query_results( &mut self, @@ -329,16 +352,18 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, stride: wgt::BufferSize, ) { - self.device.raw.cmd_copy_query_pool_results( - self.active, - set.raw, - range.start, - range.end - range.start, - buffer.raw, - offset, - stride.get(), - vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT, - ); + unsafe { + self.device.raw.cmd_copy_query_pool_results( + self.active, + set.raw, + range.start, + range.end - range.start, + buffer.raw, + offset, + stride.get(), + vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT, + ) + }; } // render @@ -358,7 +383,7 @@ impl crate::CommandEncoder for super::CommandEncoder { for cat in desc.color_attachments { if let Some(cat) = cat.as_ref() { vk_clear_values.push(vk::ClearValue { - color: cat.make_vk_clear_color(), + color: unsafe { cat.make_vk_clear_color() }, }); vk_image_views.push(cat.target.view.raw); let color = super::ColorAttachmentKey { @@ -371,7 +396,7 @@ impl crate::CommandEncoder for super::CommandEncoder { rp_key.colors.push(Some(color)); fb_key.attachments.push(cat.target.view.attachment.clone()); if let Some(ref at) = cat.resolve_target { - vk_clear_values.push(mem::zeroed()); + vk_clear_values.push(unsafe { mem::zeroed() }); vk_image_views.push(at.view.raw); fb_key.attachments.push(at.view.attachment.clone()); } @@ -456,27 +481,33 @@ impl crate::CommandEncoder for super::CommandEncoder { } if let Some(label) = desc.label { - self.begin_debug_marker(label); + unsafe { self.begin_debug_marker(label) }; self.rpass_debug_marker_active = true; } - self.device - .raw - .cmd_set_viewport(self.active, 0, &vk_viewports); - self.device - .raw - .cmd_set_scissor(self.active, 0, &[render_area]); - self.device - .raw - .cmd_begin_render_pass(self.active, &vk_info, vk::SubpassContents::INLINE); + unsafe { + self.device + .raw + .cmd_set_viewport(self.active, 0, &vk_viewports); + self.device + .raw + .cmd_set_scissor(self.active, 0, &[render_area]); + self.device.raw.cmd_begin_render_pass( + self.active, + &vk_info, + vk::SubpassContents::INLINE, + ); + }; self.bind_point = vk::PipelineBindPoint::GRAPHICS; } unsafe fn end_render_pass(&mut self) { - self.device.raw.cmd_end_render_pass(self.active); - if self.rpass_debug_marker_active { - self.end_debug_marker(); - self.rpass_debug_marker_active = false; + unsafe { + self.device.raw.cmd_end_render_pass(self.active); + if self.rpass_debug_marker_active { + self.end_debug_marker(); + self.rpass_debug_marker_active = false; + } } } @@ -488,14 +519,16 @@ impl crate::CommandEncoder for super::CommandEncoder { dynamic_offsets: &[wgt::DynamicOffset], ) { let sets = [*group.set.raw()]; - self.device.raw.cmd_bind_descriptor_sets( - self.active, - self.bind_point, - layout.raw, - index, - &sets, - dynamic_offsets, - ); + unsafe { + self.device.raw.cmd_bind_descriptor_sets( + self.active, + self.bind_point, + layout.raw, + index, + &sets, + dynamic_offsets, + ) + }; } unsafe fn set_push_constants( &mut self, @@ -504,41 +537,45 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: u32, data: &[u32], ) { - self.device.raw.cmd_push_constants( - self.active, - layout.raw, - conv::map_shader_stage(stages), - offset, - slice::from_raw_parts(data.as_ptr() as _, data.len() * 4), - ); + unsafe { + self.device.raw.cmd_push_constants( + self.active, + layout.raw, + conv::map_shader_stage(stages), + offset, + slice::from_raw_parts(data.as_ptr() as _, data.len() * 4), + ) + }; } unsafe fn insert_debug_marker(&mut self, label: &str) { if let Some(ext) = self.device.debug_messenger() { let cstr = self.temp.make_c_str(label); let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build(); - ext.cmd_insert_debug_utils_label(self.active, &vk_label); + unsafe { ext.cmd_insert_debug_utils_label(self.active, &vk_label) }; } } unsafe fn begin_debug_marker(&mut self, group_label: &str) { if let Some(ext) = self.device.debug_messenger() { let cstr = self.temp.make_c_str(group_label); let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build(); - ext.cmd_begin_debug_utils_label(self.active, &vk_label); + unsafe { ext.cmd_begin_debug_utils_label(self.active, &vk_label) }; } } unsafe fn end_debug_marker(&mut self) { if let Some(ext) = self.device.debug_messenger() { - ext.cmd_end_debug_utils_label(self.active); + unsafe { ext.cmd_end_debug_utils_label(self.active) }; } } unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) { - self.device.raw.cmd_bind_pipeline( - self.active, - vk::PipelineBindPoint::GRAPHICS, - pipeline.raw, - ); + unsafe { + self.device.raw.cmd_bind_pipeline( + self.active, + vk::PipelineBindPoint::GRAPHICS, + pipeline.raw, + ) + }; } unsafe fn set_index_buffer<'a>( @@ -546,12 +583,14 @@ impl crate::CommandEncoder for super::CommandEncoder { binding: crate::BufferBinding<'a, super::Api>, format: wgt::IndexFormat, ) { - self.device.raw.cmd_bind_index_buffer( - self.active, - binding.buffer.raw, - binding.offset, - conv::map_index_format(format), - ); + unsafe { + self.device.raw.cmd_bind_index_buffer( + self.active, + binding.buffer.raw, + binding.offset, + conv::map_index_format(format), + ) + }; } unsafe fn set_vertex_buffer<'a>( &mut self, @@ -560,9 +599,11 @@ impl crate::CommandEncoder for super::CommandEncoder { ) { let vk_buffers = [binding.buffer.raw]; let vk_offsets = [binding.offset]; - self.device - .raw - .cmd_bind_vertex_buffers(self.active, index, &vk_buffers, &vk_offsets); + unsafe { + self.device + .raw + .cmd_bind_vertex_buffers(self.active, index, &vk_buffers, &vk_offsets) + }; } unsafe fn set_viewport(&mut self, rect: &crate::Rect, depth_range: Range) { let vk_viewports = [vk::Viewport { @@ -577,9 +618,11 @@ impl crate::CommandEncoder for super::CommandEncoder { min_depth: depth_range.start, max_depth: depth_range.end, }]; - self.device - .raw - .cmd_set_viewport(self.active, 0, &vk_viewports); + unsafe { + self.device + .raw + .cmd_set_viewport(self.active, 0, &vk_viewports) + }; } unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect) { let vk_scissors = [vk::Rect2D { @@ -592,19 +635,23 @@ impl crate::CommandEncoder for super::CommandEncoder { height: rect.h, }, }]; - self.device - .raw - .cmd_set_scissor(self.active, 0, &vk_scissors); + unsafe { + self.device + .raw + .cmd_set_scissor(self.active, 0, &vk_scissors) + }; } unsafe fn set_stencil_reference(&mut self, value: u32) { - self.device.raw.cmd_set_stencil_reference( - self.active, - vk::StencilFaceFlags::FRONT_AND_BACK, - value, - ); + unsafe { + self.device.raw.cmd_set_stencil_reference( + self.active, + vk::StencilFaceFlags::FRONT_AND_BACK, + value, + ) + }; } unsafe fn set_blend_constants(&mut self, color: &[f32; 4]) { - self.device.raw.cmd_set_blend_constants(self.active, color); + unsafe { self.device.raw.cmd_set_blend_constants(self.active, color) }; } unsafe fn draw( @@ -614,13 +661,15 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.device.raw.cmd_draw( - self.active, - vertex_count, - instance_count, - start_vertex, - start_instance, - ); + unsafe { + self.device.raw.cmd_draw( + self.active, + vertex_count, + instance_count, + start_vertex, + start_instance, + ) + }; } unsafe fn draw_indexed( &mut self, @@ -630,14 +679,16 @@ impl crate::CommandEncoder for super::CommandEncoder { start_instance: u32, instance_count: u32, ) { - self.device.raw.cmd_draw_indexed( - self.active, - index_count, - instance_count, - start_index, - base_vertex, - start_instance, - ); + unsafe { + self.device.raw.cmd_draw_indexed( + self.active, + index_count, + instance_count, + start_index, + base_vertex, + start_instance, + ) + }; } unsafe fn draw_indirect( &mut self, @@ -645,13 +696,15 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.device.raw.cmd_draw_indirect( - self.active, - buffer.raw, - offset, - draw_count, - mem::size_of::() as u32, - ); + unsafe { + self.device.raw.cmd_draw_indirect( + self.active, + buffer.raw, + offset, + draw_count, + mem::size_of::() as u32, + ) + }; } unsafe fn draw_indexed_indirect( &mut self, @@ -659,13 +712,15 @@ impl crate::CommandEncoder for super::CommandEncoder { offset: wgt::BufferAddress, draw_count: u32, ) { - self.device.raw.cmd_draw_indexed_indirect( - self.active, - buffer.raw, - offset, - draw_count, - mem::size_of::() as u32, - ); + unsafe { + self.device.raw.cmd_draw_indexed_indirect( + self.active, + buffer.raw, + offset, + draw_count, + mem::size_of::() as u32, + ) + }; } unsafe fn draw_indirect_count( &mut self, @@ -678,15 +733,17 @@ impl crate::CommandEncoder for super::CommandEncoder { let stride = mem::size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { - t.cmd_draw_indirect_count( - self.active, - buffer.raw, - offset, - count_buffer.raw, - count_offset, - max_count, - stride, - ); + unsafe { + t.cmd_draw_indirect_count( + self.active, + buffer.raw, + offset, + count_buffer.raw, + count_offset, + max_count, + stride, + ) + }; } None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"), } @@ -702,15 +759,17 @@ impl crate::CommandEncoder for super::CommandEncoder { let stride = mem::size_of::() as u32; match self.device.extension_fns.draw_indirect_count { Some(ref t) => { - t.cmd_draw_indexed_indirect_count( - self.active, - buffer.raw, - offset, - count_buffer.raw, - count_offset, - max_count, - stride, - ); + unsafe { + t.cmd_draw_indexed_indirect_count( + self.active, + buffer.raw, + offset, + count_buffer.raw, + count_offset, + max_count, + stride, + ) + }; } None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"), } @@ -721,34 +780,40 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) { self.bind_point = vk::PipelineBindPoint::COMPUTE; if let Some(label) = desc.label { - self.begin_debug_marker(label); + unsafe { self.begin_debug_marker(label) }; self.rpass_debug_marker_active = true; } } unsafe fn end_compute_pass(&mut self) { if self.rpass_debug_marker_active { - self.end_debug_marker(); + unsafe { self.end_debug_marker() }; self.rpass_debug_marker_active = false } } unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) { - self.device.raw.cmd_bind_pipeline( - self.active, - vk::PipelineBindPoint::COMPUTE, - pipeline.raw, - ); + unsafe { + self.device.raw.cmd_bind_pipeline( + self.active, + vk::PipelineBindPoint::COMPUTE, + pipeline.raw, + ) + }; } unsafe fn dispatch(&mut self, count: [u32; 3]) { - self.device - .raw - .cmd_dispatch(self.active, count[0], count[1], count[2]); + unsafe { + self.device + .raw + .cmd_dispatch(self.active, count[0], count[1], count[2]) + }; } unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) { - self.device - .raw - .cmd_dispatch_indirect(self.active, buffer.raw, offset) + unsafe { + self.device + .raw + .cmd_dispatch_indirect(self.active, buffer.raw, offset) + } } } diff --git a/wgpu-hal/src/vulkan/conv.rs b/wgpu-hal/src/vulkan/conv.rs index ba1428ded5..e52409043a 100644 --- a/wgpu-hal/src/vulkan/conv.rs +++ b/wgpu-hal/src/vulkan/conv.rs @@ -149,16 +149,22 @@ pub fn map_vk_surface_formats(sf: vk::SurfaceFormatKHR) -> Option Tf::Bgra8Unorm, - F::B8G8R8A8_SRGB => Tf::Bgra8UnormSrgb, - F::R8G8B8A8_SNORM => Tf::Rgba8Snorm, - F::R8G8B8A8_UNORM => Tf::Rgba8Unorm, - F::R8G8B8A8_SRGB => Tf::Rgba8UnormSrgb, - F::R16G16B16A16_SFLOAT => Tf::Rgba16Float, - F::R16G16B16A16_SNORM => Tf::Rgba16Snorm, - F::R16G16B16A16_UNORM => Tf::Rgba16Unorm, - F::A2B10G10R10_UNORM_PACK32 => Tf::Rgb10a2Unorm, + Some(match sf.color_space { + vk::ColorSpaceKHR::SRGB_NONLINEAR => match sf.format { + F::B8G8R8A8_UNORM => Tf::Bgra8Unorm, + F::B8G8R8A8_SRGB => Tf::Bgra8UnormSrgb, + F::R8G8B8A8_SNORM => Tf::Rgba8Snorm, + F::R8G8B8A8_UNORM => Tf::Rgba8Unorm, + F::R8G8B8A8_SRGB => Tf::Rgba8UnormSrgb, + _ => return None, + }, + vk::ColorSpaceKHR::EXTENDED_SRGB_LINEAR_EXT => match sf.format { + F::R16G16B16A16_SFLOAT => Tf::Rgba16Float, + F::R16G16B16A16_SNORM => Tf::Rgba16Snorm, + F::R16G16B16A16_UNORM => Tf::Rgba16Unorm, + F::A2B10G10R10_UNORM_PACK32 => Tf::Rgb10a2Unorm, + _ => return None, + }, _ => return None, }) } diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index 65d5c70e24..084be72de9 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -49,13 +49,17 @@ impl super::DeviceShared { &buffer_vec }; - let _result = extension.debug_utils_set_object_name( - self.raw.handle(), - &vk::DebugUtilsObjectNameInfoEXT::builder() - .object_type(object_type) - .object_handle(object.as_raw()) - .object_name(CStr::from_bytes_with_nul_unchecked(name_bytes)), - ); + let name = unsafe { CStr::from_bytes_with_nul_unchecked(name_bytes) }; + + let _result = unsafe { + extension.debug_utils_set_object_name( + self.raw.handle(), + &vk::DebugUtilsObjectNameInfoEXT::builder() + .object_type(object_type) + .object_handle(object.as_raw()) + .object_name(name), + ) + }; } pub fn make_render_pass( @@ -278,13 +282,13 @@ impl super::DeviceShared { unsafe fn free_resources(&self) { for &raw in self.render_passes.lock().values() { - self.raw.destroy_render_pass(raw, None); + unsafe { self.raw.destroy_render_pass(raw, None) }; } for &raw in self.framebuffers.lock().values() { - self.raw.destroy_framebuffer(raw, None); + unsafe { self.raw.destroy_framebuffer(raw, None) }; } if self.handle_is_owned { - self.raw.destroy_device(None); + unsafe { self.raw.destroy_device(None) }; } } } @@ -308,7 +312,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { info = info.push_next(&mut info_flags); } - match self.raw.allocate_memory(&info, None) { + match unsafe { self.raw.allocate_memory(&info, None) } { Ok(memory) => Ok(memory), Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { Err(gpu_alloc::OutOfMemory::OutOfDeviceMemory) @@ -322,7 +326,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { } unsafe fn deallocate_memory(&self, memory: vk::DeviceMemory) { - self.raw.free_memory(memory, None); + unsafe { self.raw.free_memory(memory, None) }; } unsafe fn map_memory( @@ -331,10 +335,10 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { offset: u64, size: u64, ) -> Result, gpu_alloc::DeviceMapError> { - match self - .raw - .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty()) - { + match unsafe { + self.raw + .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty()) + } { Ok(ptr) => Ok(ptr::NonNull::new(ptr as *mut u8) .expect("Pointer to memory mapping must not be null")), Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { @@ -349,7 +353,7 @@ impl gpu_alloc::MemoryDevice for super::DeviceShared { } unsafe fn unmap_memory(&self, memory: &mut vk::DeviceMemory) { - self.raw.unmap_memory(*memory); + unsafe { self.raw.unmap_memory(*memory) }; } unsafe fn invalidate_memory_ranges( @@ -433,7 +437,7 @@ impl .pool_sizes(&filtered_counts) .build(); - match self.raw.create_descriptor_pool(&vk_info, None) { + match unsafe { self.raw.create_descriptor_pool(&vk_info, None) } { Ok(pool) => Ok(pool), Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { Err(gpu_descriptor::CreatePoolError::OutOfHostMemory) @@ -452,7 +456,7 @@ impl } unsafe fn destroy_descriptor_pool(&self, pool: vk::DescriptorPool) { - self.raw.destroy_descriptor_pool(pool, None) + unsafe { self.raw.destroy_descriptor_pool(pool, None) } } unsafe fn alloc_descriptor_sets<'a>( @@ -461,16 +465,18 @@ impl layouts: impl ExactSizeIterator, sets: &mut impl Extend, ) -> Result<(), gpu_descriptor::DeviceAllocationError> { - let result = self.raw.allocate_descriptor_sets( - &vk::DescriptorSetAllocateInfo::builder() - .descriptor_pool(*pool) - .set_layouts( - &smallvec::SmallVec::<[vk::DescriptorSetLayout; 32]>::from_iter( - layouts.cloned(), - ), - ) - .build(), - ); + let result = unsafe { + self.raw.allocate_descriptor_sets( + &vk::DescriptorSetAllocateInfo::builder() + .descriptor_pool(*pool) + .set_layouts( + &smallvec::SmallVec::<[vk::DescriptorSetLayout; 32]>::from_iter( + layouts.cloned(), + ), + ) + .build(), + ) + }; match result { Ok(vk_sets) => { @@ -499,10 +505,12 @@ impl pool: &mut vk::DescriptorPool, sets: impl Iterator, ) { - let result = self.raw.free_descriptor_sets( - *pool, - &smallvec::SmallVec::<[vk::DescriptorSet; 32]>::from_iter(sets), - ); + let result = unsafe { + self.raw.free_descriptor_sets( + *pool, + &smallvec::SmallVec::<[vk::DescriptorSet; 32]>::from_iter(sets), + ) + }; match result { Ok(()) => {} Err(err) => log::error!("free_descriptor_sets: {:?}", err), @@ -559,12 +567,12 @@ impl super::Device { let result = { profiling::scope!("vkCreateSwapchainKHR"); - functor.create_swapchain(&info, None) + unsafe { functor.create_swapchain(&info, None) } }; // doing this before bailing out with error if old_swapchain != vk::SwapchainKHR::null() { - functor.destroy_swapchain(old_swapchain, None) + unsafe { functor.destroy_swapchain(old_swapchain, None) } } let raw = match result { @@ -580,15 +588,11 @@ impl super::Device { } }; - let images = functor - .get_swapchain_images(raw) - .map_err(crate::DeviceError::from)?; + let images = + unsafe { functor.get_swapchain_images(raw) }.map_err(crate::DeviceError::from)?; let vk_info = vk::FenceCreateInfo::builder().build(); - let fence = self - .shared - .raw - .create_fence(&vk_info, None) + let fence = unsafe { self.shared.raw.create_fence(&vk_info, None) } .map_err(crate::DeviceError::from)?; Ok(super::Swapchain { @@ -741,12 +745,12 @@ impl super::Device { impl crate::Device for super::Device { unsafe fn exit(self, queue: super::Queue) { - self.mem_allocator.into_inner().cleanup(&*self.shared); - self.desc_allocator.into_inner().cleanup(&*self.shared); + unsafe { self.mem_allocator.into_inner().cleanup(&*self.shared) }; + unsafe { self.desc_allocator.into_inner().cleanup(&*self.shared) }; for &sem in queue.relay_semaphores.iter() { - self.shared.raw.destroy_semaphore(sem, None); + unsafe { self.shared.raw.destroy_semaphore(sem, None) }; } - self.shared.free_resources(); + unsafe { self.shared.free_resources() }; } unsafe fn create_buffer( @@ -758,8 +762,8 @@ impl crate::Device for super::Device { .usage(conv::map_buffer_usage(desc.usage)) .sharing_mode(vk::SharingMode::EXCLUSIVE); - let raw = self.shared.raw.create_buffer(&vk_info, None)?; - let req = self.shared.raw.get_buffer_memory_requirements(raw); + let raw = unsafe { self.shared.raw.create_buffer(&vk_info, None)? }; + let req = unsafe { self.shared.raw.get_buffer_memory_requirements(raw) }; let mut alloc_usage = if desc .usage @@ -784,23 +788,29 @@ impl crate::Device for super::Device { desc.memory_flags.contains(crate::MemoryFlags::TRANSIENT), ); - let block = self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: req.alignment - 1, - usage: alloc_usage, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )?; + let block = unsafe { + self.mem_allocator.lock().alloc( + &*self.shared, + gpu_alloc::Request { + size: req.size, + align_mask: req.alignment - 1, + usage: alloc_usage, + memory_types: req.memory_type_bits & self.valid_ash_memory_types, + }, + )? + }; - self.shared - .raw - .bind_buffer_memory(raw, *block.memory(), block.offset())?; + unsafe { + self.shared + .raw + .bind_buffer_memory(raw, *block.memory(), block.offset())? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::BUFFER, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::BUFFER, raw, label) + }; } Ok(super::Buffer { @@ -809,10 +819,12 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { - self.shared.raw.destroy_buffer(buffer.raw, None); - self.mem_allocator - .lock() - .dealloc(&*self.shared, buffer.block.into_inner()); + unsafe { self.shared.raw.destroy_buffer(buffer.raw, None) }; + unsafe { + self.mem_allocator + .lock() + .dealloc(&*self.shared, buffer.block.into_inner()) + }; } unsafe fn map_buffer( @@ -822,14 +834,14 @@ impl crate::Device for super::Device { ) -> Result { let size = range.end - range.start; let mut block = buffer.block.lock(); - let ptr = block.map(&*self.shared, range.start, size as usize)?; + let ptr = unsafe { block.map(&*self.shared, range.start, size as usize)? }; let is_coherent = block .props() .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT); Ok(crate::BufferMapping { ptr, is_coherent }) } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> { - buffer.block.lock().unmap(&*self.shared); + unsafe { buffer.block.lock().unmap(&*self.shared) }; Ok(()) } @@ -839,12 +851,14 @@ impl crate::Device for super::Device { { let vk_ranges = self.shared.make_memory_ranges(buffer, ranges); - self.shared - .raw - .flush_mapped_memory_ranges( - &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), - ) - .unwrap(); + unsafe { + self.shared + .raw + .flush_mapped_memory_ranges( + &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), + ) + } + .unwrap(); } unsafe fn invalidate_mapped_ranges(&self, buffer: &super::Buffer, ranges: I) where @@ -852,12 +866,14 @@ impl crate::Device for super::Device { { let vk_ranges = self.shared.make_memory_ranges(buffer, ranges); - self.shared - .raw - .invalidate_mapped_memory_ranges( - &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), - ) - .unwrap(); + unsafe { + self.shared + .raw + .invalidate_mapped_memory_ranges( + &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges), + ) + } + .unwrap(); } unsafe fn create_texture( @@ -896,26 +912,32 @@ impl crate::Device for super::Device { .sharing_mode(vk::SharingMode::EXCLUSIVE) .initial_layout(vk::ImageLayout::UNDEFINED); - let raw = self.shared.raw.create_image(&vk_info, None)?; - let req = self.shared.raw.get_image_memory_requirements(raw); - - let block = self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: req.alignment - 1, - usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )?; + let raw = unsafe { self.shared.raw.create_image(&vk_info, None)? }; + let req = unsafe { self.shared.raw.get_image_memory_requirements(raw) }; + + let block = unsafe { + self.mem_allocator.lock().alloc( + &*self.shared, + gpu_alloc::Request { + size: req.size, + align_mask: req.alignment - 1, + usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, + memory_types: req.memory_type_bits & self.valid_ash_memory_types, + }, + )? + }; - self.shared - .raw - .bind_image_memory(raw, *block.memory(), block.offset())?; + unsafe { + self.shared + .raw + .bind_image_memory(raw, *block.memory(), block.offset())? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::IMAGE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::IMAGE, raw, label) + }; } Ok(super::Texture { @@ -931,10 +953,10 @@ impl crate::Device for super::Device { } unsafe fn destroy_texture(&self, texture: super::Texture) { if texture.drop_guard.is_none() { - self.shared.raw.destroy_image(texture.raw, None); + unsafe { self.shared.raw.destroy_image(texture.raw, None) }; } if let Some(block) = texture.block { - self.mem_allocator.lock().dealloc(&*self.shared, block); + unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) }; } } @@ -964,11 +986,13 @@ impl crate::Device for super::Device { texture.usage }; - let raw = self.shared.raw.create_image_view(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_image_view(&vk_info, None) }?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::IMAGE_VIEW, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::IMAGE_VIEW, raw, label) + }; } let attachment = super::FramebufferAttachment { @@ -993,12 +1017,12 @@ impl crate::Device for super::Device { let mut fbuf_lock = self.shared.framebuffers.lock(); for (key, &raw_fbuf) in fbuf_lock.iter() { if key.attachments.iter().any(|at| at.raw == view.raw) { - self.shared.raw.destroy_framebuffer(raw_fbuf, None); + unsafe { self.shared.raw.destroy_framebuffer(raw_fbuf, None) }; } } fbuf_lock.retain(|key, _| !key.attachments.iter().any(|at| at.raw == view.raw)); } - self.shared.raw.destroy_image_view(view.raw, None); + unsafe { self.shared.raw.destroy_image_view(view.raw, None) }; } unsafe fn create_sampler( @@ -1040,17 +1064,19 @@ impl crate::Device for super::Device { vk_info = vk_info.border_color(conv::map_border_color(color)); } - let raw = self.shared.raw.create_sampler(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_sampler(&vk_info, None)? }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::SAMPLER, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::SAMPLER, raw, label) + }; } Ok(super::Sampler { raw }) } unsafe fn destroy_sampler(&self, sampler: super::Sampler) { - self.shared.raw.destroy_sampler(sampler.raw, None); + unsafe { self.shared.raw.destroy_sampler(sampler.raw, None) }; } unsafe fn create_command_encoder( @@ -1061,7 +1087,7 @@ impl crate::Device for super::Device { .queue_family_index(desc.queue.family_index) .flags(vk::CommandPoolCreateFlags::TRANSIENT) .build(); - let raw = self.shared.raw.create_command_pool(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_command_pool(&vk_info, None)? }; Ok(super::CommandEncoder { raw, @@ -1075,17 +1101,19 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_command_encoder(&self, cmd_encoder: super::CommandEncoder) { - if !cmd_encoder.free.is_empty() { - self.shared - .raw - .free_command_buffers(cmd_encoder.raw, &cmd_encoder.free); - } - if !cmd_encoder.discarded.is_empty() { - self.shared - .raw - .free_command_buffers(cmd_encoder.raw, &cmd_encoder.discarded); + unsafe { + if !cmd_encoder.free.is_empty() { + self.shared + .raw + .free_command_buffers(cmd_encoder.raw, &cmd_encoder.free) + } + if !cmd_encoder.discarded.is_empty() { + self.shared + .raw + .free_command_buffers(cmd_encoder.raw, &cmd_encoder.discarded) + } + self.shared.raw.destroy_command_pool(cmd_encoder.raw, None); } - self.shared.raw.destroy_command_pool(cmd_encoder.raw, None); } unsafe fn create_bind_group_layout( @@ -1224,14 +1252,17 @@ impl crate::Device for super::Device { let vk_info = vk_info.flags(dsl_create_flags); - let raw = self - .shared - .raw - .create_descriptor_set_layout(&vk_info, None)?; + let raw = unsafe { + self.shared + .raw + .create_descriptor_set_layout(&vk_info, None)? + }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, raw, label) + }; } Ok(super::BindGroupLayout { @@ -1243,9 +1274,11 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_bind_group_layout(&self, bg_layout: super::BindGroupLayout) { - self.shared - .raw - .destroy_descriptor_set_layout(bg_layout.raw, None); + unsafe { + self.shared + .raw + .destroy_descriptor_set_layout(bg_layout.raw, None) + }; } unsafe fn create_pipeline_layout( @@ -1275,12 +1308,14 @@ impl crate::Device for super::Device { let raw = { profiling::scope!("vkCreatePipelineLayout"); - self.shared.raw.create_pipeline_layout(&vk_info, None)? + unsafe { self.shared.raw.create_pipeline_layout(&vk_info, None)? } }; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE_LAYOUT, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE_LAYOUT, raw, label) + }; } let mut binding_arrays = BTreeMap::new(); @@ -1304,31 +1339,37 @@ impl crate::Device for super::Device { }) } unsafe fn destroy_pipeline_layout(&self, pipeline_layout: super::PipelineLayout) { - self.shared - .raw - .destroy_pipeline_layout(pipeline_layout.raw, None); + unsafe { + self.shared + .raw + .destroy_pipeline_layout(pipeline_layout.raw, None) + }; } unsafe fn create_bind_group( &self, desc: &crate::BindGroupDescriptor, ) -> Result { - let mut vk_sets = self.desc_allocator.lock().allocate( - &*self.shared, - &desc.layout.raw, - if desc.layout.requires_update_after_bind { - gpu_descriptor::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND - } else { - gpu_descriptor::DescriptorSetLayoutCreateFlags::empty() - }, - &desc.layout.desc_count, - 1, - )?; + let mut vk_sets = unsafe { + self.desc_allocator.lock().allocate( + &*self.shared, + &desc.layout.raw, + if desc.layout.requires_update_after_bind { + gpu_descriptor::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND + } else { + gpu_descriptor::DescriptorSetLayoutCreateFlags::empty() + }, + &desc.layout.desc_count, + 1, + )? + }; let set = vk_sets.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::DESCRIPTOR_SET, *set.raw(), label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::DESCRIPTOR_SET, *set.raw(), label) + }; } let mut writes = Vec::with_capacity(desc.entries.len()); @@ -1397,13 +1438,15 @@ impl crate::Device for super::Device { writes.push(write.build()); } - self.shared.raw.update_descriptor_sets(&writes, &[]); + unsafe { self.shared.raw.update_descriptor_sets(&writes, &[]) }; Ok(super::BindGroup { set }) } unsafe fn destroy_bind_group(&self, group: super::BindGroup) { - self.desc_allocator - .lock() - .free(&*self.shared, Some(group.set)); + unsafe { + self.desc_allocator + .lock() + .free(&*self.shared, Some(group.set)) + }; } unsafe fn create_shader_module( @@ -1445,11 +1488,13 @@ impl crate::Device for super::Device { crate::ShaderInput::SpirV(spv) => Cow::Borrowed(spv), }; - let raw = self.create_shader_module_impl(&*spv)?; + let raw = self.create_shader_module_impl(&spv)?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::SHADER_MODULE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::SHADER_MODULE, raw, label) + }; } Ok(super::ShaderModule::Raw(raw)) @@ -1457,7 +1502,7 @@ impl crate::Device for super::Device { unsafe fn destroy_shader_module(&self, module: super::ShaderModule) { match module { super::ShaderModule::Raw(raw) => { - self.shared.raw.destroy_shader_module(raw, None); + unsafe { self.shared.raw.destroy_shader_module(raw, None) }; } super::ShaderModule::Intermediate { .. } => {} } @@ -1674,33 +1719,37 @@ impl crate::Device for super::Device { let mut raw_vec = { profiling::scope!("vkCreateGraphicsPipelines"); - self.shared - .raw - .create_graphics_pipelines(vk::PipelineCache::null(), &vk_infos, None) - .map_err(|(_, e)| crate::DeviceError::from(e))? + unsafe { + self.shared + .raw + .create_graphics_pipelines(vk::PipelineCache::null(), &vk_infos, None) + .map_err(|(_, e)| crate::DeviceError::from(e)) + }? }; let raw = raw_vec.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE, raw, label) + }; } if let Some(raw_module) = compiled_vs.temp_raw_module { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } if let Some(CompiledStage { temp_raw_module: Some(raw_module), .. }) = compiled_fs { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } Ok(super::RenderPipeline { raw }) } unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) { - self.shared.raw.destroy_pipeline(pipeline.raw, None); + unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) }; } unsafe fn create_compute_pipeline( @@ -1722,26 +1771,30 @@ impl crate::Device for super::Device { let mut raw_vec = { profiling::scope!("vkCreateComputePipelines"); - self.shared - .raw - .create_compute_pipelines(vk::PipelineCache::null(), &vk_infos, None) - .map_err(|(_, e)| crate::DeviceError::from(e))? + unsafe { + self.shared + .raw + .create_compute_pipelines(vk::PipelineCache::null(), &vk_infos, None) + .map_err(|(_, e)| crate::DeviceError::from(e)) + }? }; let raw = raw_vec.pop().unwrap(); if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::PIPELINE, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::PIPELINE, raw, label) + }; } if let Some(raw_module) = compiled.temp_raw_module { - self.shared.raw.destroy_shader_module(raw_module, None); + unsafe { self.shared.raw.destroy_shader_module(raw_module, None) }; } Ok(super::ComputePipeline { raw }) } unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) { - self.shared.raw.destroy_pipeline(pipeline.raw, None); + unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) }; } unsafe fn create_query_set( @@ -1769,16 +1822,18 @@ impl crate::Device for super::Device { .pipeline_statistics(pipeline_statistics) .build(); - let raw = self.shared.raw.create_query_pool(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_query_pool(&vk_info, None) }?; if let Some(label) = desc.label { - self.shared - .set_object_name(vk::ObjectType::QUERY_POOL, raw, label); + unsafe { + self.shared + .set_object_name(vk::ObjectType::QUERY_POOL, raw, label) + }; } Ok(super::QuerySet { raw }) } unsafe fn destroy_query_set(&self, set: super::QuerySet) { - self.shared.raw.destroy_query_pool(set.raw, None); + unsafe { self.shared.raw.destroy_query_pool(set.raw, None) }; } unsafe fn create_fence(&self) -> Result { @@ -1786,7 +1841,7 @@ impl crate::Device for super::Device { let mut sem_type_info = vk::SemaphoreTypeCreateInfo::builder().semaphore_type(vk::SemaphoreType::TIMELINE); let vk_info = vk::SemaphoreCreateInfo::builder().push_next(&mut sem_type_info); - let raw = self.shared.raw.create_semaphore(&vk_info, None)?; + let raw = unsafe { self.shared.raw.create_semaphore(&vk_info, None) }?; super::Fence::TimelineSemaphore(raw) } else { super::Fence::FencePool { @@ -1799,7 +1854,7 @@ impl crate::Device for super::Device { unsafe fn destroy_fence(&self, fence: super::Fence) { match fence { super::Fence::TimelineSemaphore(raw) => { - self.shared.raw.destroy_semaphore(raw, None); + unsafe { self.shared.raw.destroy_semaphore(raw, None) }; } super::Fence::FencePool { active, @@ -1807,10 +1862,10 @@ impl crate::Device for super::Device { last_completed: _, } => { for (_, raw) in active { - self.shared.raw.destroy_fence(raw, None); + unsafe { self.shared.raw.destroy_fence(raw, None) }; } for raw in free { - self.shared.raw.destroy_fence(raw, None); + unsafe { self.shared.raw.destroy_fence(raw, None) }; } } } @@ -1839,12 +1894,12 @@ impl crate::Device for super::Device { .semaphores(&semaphores) .values(&values); let result = match self.shared.extension_fns.timeline_semaphore { - Some(super::ExtensionFn::Extension(ref ext)) => { + Some(super::ExtensionFn::Extension(ref ext)) => unsafe { ext.wait_semaphores(&vk_info, timeout_ns) - } - Some(super::ExtensionFn::Promoted) => { + }, + Some(super::ExtensionFn::Promoted) => unsafe { self.shared.raw.wait_semaphores(&vk_info, timeout_ns) - } + }, None => unreachable!(), }; match result { @@ -1863,7 +1918,9 @@ impl crate::Device for super::Device { } else { match active.iter().find(|&&(value, _)| value >= wait_value) { Some(&(_, raw)) => { - match self.shared.raw.wait_for_fences(&[raw], true, timeout_ns) { + match unsafe { + self.shared.raw.wait_for_fences(&[raw], true, timeout_ns) + } { Ok(()) => Ok(true), Err(vk::Result::TIMEOUT) => Ok(false), Err(other) => Err(other.into()), @@ -1885,9 +1942,11 @@ impl crate::Device for super::Device { // Renderdoc requires us to give us the pointer that vkInstance _points to_. let raw_vk_instance = ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _; - let raw_vk_instance_dispatch_table = *raw_vk_instance; - self.render_doc - .start_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance }; + unsafe { + self.render_doc + .start_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + } } #[cfg(not(feature = "renderdoc"))] false @@ -1898,10 +1957,12 @@ impl crate::Device for super::Device { // Renderdoc requires us to give us the pointer that vkInstance _points to_. let raw_vk_instance = ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _; - let raw_vk_instance_dispatch_table = *raw_vk_instance; + let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance }; - self.render_doc - .end_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + unsafe { + self.render_doc + .end_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut()) + } } } } diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index 19d7684eb2..0f828e3a26 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -31,17 +31,17 @@ unsafe extern "system" fn debug_utils_messenger_callback( _ => log::Level::Warn, }; - let cd = &*callback_data_ptr; + let cd = unsafe { &*callback_data_ptr }; let message_id_name = if cd.p_message_id_name.is_null() { Cow::from("") } else { - CStr::from_ptr(cd.p_message_id_name).to_string_lossy() + unsafe { CStr::from_ptr(cd.p_message_id_name) }.to_string_lossy() }; let message = if cd.p_message.is_null() { Cow::from("") } else { - CStr::from_ptr(cd.p_message).to_string_lossy() + unsafe { CStr::from_ptr(cd.p_message) }.to_string_lossy() }; let _ = std::panic::catch_unwind(|| { @@ -56,14 +56,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( }); if cd.queue_label_count != 0 { - let labels = slice::from_raw_parts(cd.p_queue_labels, cd.queue_label_count as usize); + let labels = + unsafe { slice::from_raw_parts(cd.p_queue_labels, cd.queue_label_count as usize) }; let names = labels .iter() .flat_map(|dul_obj| { - dul_obj - .p_label_name - .as_ref() - .map(|lbl| CStr::from_ptr(lbl).to_string_lossy()) + unsafe { dul_obj.p_label_name.as_ref() } + .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy()) }) .collect::>(); @@ -73,14 +72,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( } if cd.cmd_buf_label_count != 0 { - let labels = slice::from_raw_parts(cd.p_cmd_buf_labels, cd.cmd_buf_label_count as usize); + let labels = + unsafe { slice::from_raw_parts(cd.p_cmd_buf_labels, cd.cmd_buf_label_count as usize) }; let names = labels .iter() .flat_map(|dul_obj| { - dul_obj - .p_label_name - .as_ref() - .map(|lbl| CStr::from_ptr(lbl).to_string_lossy()) + unsafe { dul_obj.p_label_name.as_ref() } + .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy()) }) .collect::>(); @@ -90,15 +88,13 @@ unsafe extern "system" fn debug_utils_messenger_callback( } if cd.object_count != 0 { - let labels = slice::from_raw_parts(cd.p_objects, cd.object_count as usize); + let labels = unsafe { slice::from_raw_parts(cd.p_objects, cd.object_count as usize) }; //TODO: use color fields of `vk::DebugUtilsLabelExt`? let names = labels .iter() .map(|obj_info| { - let name = obj_info - .p_object_name - .as_ref() - .map(|name| CStr::from_ptr(name).to_string_lossy()) + let name = unsafe { obj_info.p_object_name.as_ref() } + .map(|name| unsafe { CStr::from_ptr(name) }.to_string_lossy()) .unwrap_or(Cow::Borrowed("?")); format!( @@ -125,9 +121,9 @@ impl super::Swapchain { profiling::scope!("Swapchain::release_resources"); { profiling::scope!("vkDeviceWaitIdle"); - let _ = device.device_wait_idle(); + let _ = unsafe { device.device_wait_idle() }; }; - device.destroy_fence(self.fence, None); + unsafe { device.destroy_fence(self.fence, None) }; self } } @@ -256,9 +252,8 @@ impl super::Instance { | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE, ) .pfn_user_callback(Some(debug_utils_messenger_callback)); - let messenger = extension - .create_debug_utils_messenger(&vk_info, None) - .unwrap(); + let messenger = + unsafe { extension.create_debug_utils_messenger(&vk_info, None) }.unwrap(); Some(super::DebugUtils { extension, messenger, @@ -484,7 +479,7 @@ impl crate::Instance for super::Instance { unsafe fn init(desc: &crate::InstanceDescriptor) -> Result { use crate::auxil::cstr_from_bytes_until_nul; - let entry = match ash::Entry::load() { + let entry = match unsafe { ash::Entry::load() } { Ok(entry) => entry, Err(err) => { log::info!("Missing Vulkan entry points: {:?}", err); @@ -595,22 +590,24 @@ impl crate::Instance for super::Instance { .enabled_layer_names(&str_pointers[..layers.len()]) .enabled_extension_names(&str_pointers[layers.len()..]); - entry.create_instance(&create_info, None).map_err(|e| { + unsafe { entry.create_instance(&create_info, None) }.map_err(|e| { log::warn!("create_instance: {:?}", e); crate::InstanceError })? }; - Self::from_raw( - entry, - vk_instance, - driver_api_version, - android_sdk_version, - extensions, - desc.flags, - has_nv_optimus, - Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance - ) + unsafe { + Self::from_raw( + entry, + vk_instance, + driver_api_version, + android_sdk_version, + extensions, + desc.flags, + has_nv_optimus, + Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance + ) + } } unsafe fn create_surface( @@ -635,7 +632,7 @@ impl crate::Instance for super::Instance { (Rwh::Win32(handle), _) => { use winapi::um::libloaderapi::GetModuleHandleW; - let hinstance = GetModuleHandleW(std::ptr::null()); + let hinstance = unsafe { GetModuleHandleW(std::ptr::null()) }; self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd) } #[cfg(target_os = "macos")] @@ -655,13 +652,13 @@ impl crate::Instance for super::Instance { } unsafe fn destroy_surface(&self, surface: super::Surface) { - surface.functor.destroy_surface(surface.raw, None); + unsafe { surface.functor.destroy_surface(surface.raw, None) }; } unsafe fn enumerate_adapters(&self) -> Vec> { use crate::auxil::db; - let raw_devices = match self.shared.raw.enumerate_physical_devices() { + let raw_devices = match unsafe { self.shared.raw.enumerate_physical_devices() } { Ok(devices) => devices, Err(err) => { log::error!("enumerate_adapters: {}", err); @@ -708,9 +705,9 @@ impl crate::Surface for super::Surface { let old = self .swapchain .take() - .map(|sc| sc.release_resources(&device.shared.raw)); + .map(|sc| unsafe { sc.release_resources(&device.shared.raw) }); - let swapchain = device.create_swapchain(self, config, old)?; + let swapchain = unsafe { device.create_swapchain(self, config, old)? }; self.swapchain = Some(swapchain); Ok(()) @@ -718,8 +715,8 @@ impl crate::Surface for super::Surface { unsafe fn unconfigure(&mut self, device: &super::Device) { if let Some(sc) = self.swapchain.take() { - let swapchain = sc.release_resources(&device.shared.raw); - swapchain.functor.destroy_swapchain(swapchain.raw, None); + let swapchain = unsafe { sc.release_resources(&device.shared.raw) }; + unsafe { swapchain.functor.destroy_swapchain(swapchain.raw, None) }; } } @@ -748,23 +745,22 @@ impl crate::Surface for super::Surface { } // will block if no image is available - let (index, suboptimal) = - match sc - .functor + let (index, suboptimal) = match unsafe { + sc.functor .acquire_next_image(sc.raw, timeout_ns, vk::Semaphore::null(), sc.fence) - { - Ok(pair) => pair, - Err(error) => { - return match error { - vk::Result::TIMEOUT => Ok(None), - vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => { - Err(crate::SurfaceError::Outdated) - } - vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost), - other => Err(crate::DeviceError::from(other).into()), + } { + Ok(pair) => pair, + Err(error) => { + return match error { + vk::Result::TIMEOUT => Ok(None), + vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => { + Err(crate::SurfaceError::Outdated) } + vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost), + other => Err(crate::DeviceError::from(other).into()), } - }; + } + }; // special case for Intel Vulkan returning bizzare values (ugh) if sc.device.vendor_id == crate::auxil::db::intel::VENDOR && index > 0x100 { @@ -773,14 +769,9 @@ impl crate::Surface for super::Surface { let fences = &[sc.fence]; - sc.device - .raw - .wait_for_fences(fences, true, !0) - .map_err(crate::DeviceError::from)?; - sc.device - .raw - .reset_fences(fences) + unsafe { sc.device.raw.wait_for_fences(fences, true, !0) } .map_err(crate::DeviceError::from)?; + unsafe { sc.device.raw.reset_fences(fences) }.map_err(crate::DeviceError::from)?; let texture = super::SurfaceTexture { index, diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index fd1266de37..f8ffbc718e 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -589,10 +589,11 @@ impl crate::Queue for Queue { } => { fence_raw = match free.pop() { Some(raw) => raw, - None => self - .device - .raw - .create_fence(&vk::FenceCreateInfo::builder(), None)?, + None => unsafe { + self.device + .raw + .create_fence(&vk::FenceCreateInfo::builder(), None)? + }, }; active.push((value, fence_raw)); } @@ -620,9 +621,11 @@ impl crate::Queue for Queue { vk_info = vk_info.signal_semaphores(&signal_semaphores[..signal_count]); profiling::scope!("vkQueueSubmit"); - self.device - .raw - .queue_submit(self.raw, &[vk_info.build()], fence_raw)?; + unsafe { + self.device + .raw + .queue_submit(self.raw, &[vk_info.build()], fence_raw)? + }; Ok(()) } @@ -645,13 +648,13 @@ impl crate::Queue for Queue { let suboptimal = { profiling::scope!("vkQueuePresentKHR"); - self.swapchain_fn - .queue_present(self.raw, &vk_info) - .map_err(|error| match error { + unsafe { self.swapchain_fn.queue_present(self.raw, &vk_info) }.map_err(|error| { + match error { vk::Result::ERROR_OUT_OF_DATE_KHR => crate::SurfaceError::Outdated, vk::Result::ERROR_SURFACE_LOST_KHR => crate::SurfaceError::Lost, _ => crate::DeviceError::from(error).into(), - })? + } + })? }; if suboptimal { log::warn!("Suboptimal present of frame {}", texture.index); diff --git a/wgpu-info/Cargo.toml b/wgpu-info/Cargo.toml index 84650d1b7a..073461b8df 100644 --- a/wgpu-info/Cargo.toml +++ b/wgpu-info/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "wgpu-info" -version = "0.14.0" -authors = ["wgpu developers"] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "Adapter information and per-adapter test program" -homepage = "https://github.com/gfx-rs/wgpu" -repository = "https://github.com/gfx-rs/wgpu" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true +publish = false [dependencies] -env_logger = "0.9" -wgpu = { version = "0.14", path = "../wgpu" } +env_logger.workspace = true +wgpu.workspace = true diff --git a/wgpu-info/src/main.rs b/wgpu-info/src/main.rs index 00d5e8c0f1..9d477e7cd5 100644 --- a/wgpu-info/src/main.rs +++ b/wgpu-info/src/main.rs @@ -132,7 +132,7 @@ mod inner { let downlevel = adapter.get_downlevel_capabilities(); let features = adapter.features(); let limits = adapter.limits(); - + println!("Adapter {}:", idx); println!("\t Backend: {:?}", info.backend); println!("\t Name: {:?}", info.name); @@ -140,7 +140,7 @@ mod inner { println!("\t DeviceID: {:?}", info.device); println!("\t Type: {:?}", info.device_type); println!("\t Driver: {:?}", info.driver); - println!("\tDriverInfo: {:?}", info.driver); + println!("\tDriverInfo: {:?}", info.driver_info); println!("\t Compliant: {:?}", downlevel.is_webgpu_compliant()); println!("\tFeatures:"); for i in 0..(size_of::() * 8) { @@ -151,7 +151,7 @@ mod inner { } } } - + println!("\tLimits:"); let wgpu::Limits { max_texture_dimension_1d, @@ -213,7 +213,7 @@ mod inner { println!("\t\t Max Compute Workgroup Size Y: {}", max_compute_workgroup_size_y); println!("\t\t Max Compute Workgroup Size Z: {}", max_compute_workgroup_size_z); println!("\t\t Max Compute Workgroups Per Dimension: {}", max_compute_workgroups_per_dimension); - + println!("\tDownlevel Properties:"); let wgpu::DownlevelCapabilities { shader_model, @@ -230,7 +230,7 @@ mod inner { } } - println!("\tTexture Format Features: ┌──────────┬──────────┬──────────Allowed┬Usages───────────┬───────────────────┐ ┌────────────┬─────────────┬──────────────Feature┬Flags───────────────┬─────────────────┐"); + println!("\tTexture Format Features: ┌──────────┬──────────┬──────────Allowed┬Usages───────────┬───────────────────┐ ┌────────────┬────────────────┬──────────────Feature┬Flags──────┬─────────────────────┬────────────────────┬─"); for format in TEXTURE_FORMAT_LIST { let features = adapter.get_texture_format_features(format); let format_name = match format { @@ -271,9 +271,10 @@ mod inner { } } } + println!(" │"); } - println!("\t └──────────┴──────────┴─────────────────┴─────────────────┴───────────────────┘ └────────────┴─────────────┴─────────────────────┴────────────────────┴─────────────────┘"); + println!("\t └──────────┴──────────┴─────────────────┴─────────────────┴───────────────────┘ └────────────┴────────────────┴────────────────┴────────────────┴─────────────────────┘"); } pub fn main() { diff --git a/wgpu-types/Cargo.toml b/wgpu-types/Cargo.toml index e9ae58b990..0748296a81 100644 --- a/wgpu-types/Cargo.toml +++ b/wgpu-types/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "wgpu-types" -version = "0.14.0" -authors = ["wgpu developers"] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "WebGPU types" -homepage = "https://github.com/gfx-rs/wgpu" -repository = "https://github.com/gfx-rs/wgpu" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] [lib] [features] -trace = ["serde", "bitflags_serde_shim"] -replay = ["serde", "bitflags_serde_shim"] +trace = ["serde"] +replay = ["serde"] [dependencies] -bitflags = "1.0" -serde = { version = "1.0", features = ["serde_derive"], optional = true } -bitflags_serde_shim = { version = "0.2", optional = true } +bitflags.workspace = true +serde = { workspace = true, features = ["serde_derive"], optional = true } [dev-dependencies] -serde_json = "1.0.85" +serde_json.workspace = true diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index ed82c00ded..82d5708c12 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -2,17 +2,60 @@ * This API is used for targeting both Web and Native. */ +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow( // We don't use syntax sugar where it's not necessary. clippy::match_like_matches_macro, )] -#![warn(missing_docs)] +#![warn(missing_docs, unsafe_op_in_unsafe_fn)] #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::hash::{Hash, Hasher}; use std::{num::NonZeroU32, ops::Range}; +// Use this macro instead of the one provided by the bitflags_serde_shim crate +// because the latter produces an error when deserializing bits that are not +// specified in the bitflags, while we want deserialization to succeed and +// and unspecified bits to lead to errors handled in wgpu-core. +// Note that plainly deriving Serialize and Deserialized would have a similar +// behavior to this macro (unspecified bit do not produce an error). +macro_rules! impl_bitflags { + ($name:ident) => { + #[cfg(feature = "trace")] + impl serde::Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.bits().serialize(serializer) + } + } + + #[cfg(feature = "replay")] + impl<'de> serde::Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result<$name, D::Error> + where + D: serde::Deserializer<'de>, + { + let value = <_ as serde::Deserialize<'de>>::deserialize(deserializer)?; + // Note: newer version of bitflags replaced from_bits_unchecked with + // from_bits_retain which is not marked as unsafe (same implementation). + Ok(unsafe { $name::from_bits_unchecked(value) }) + } + } + + impl $name { + /// Returns true if the bitflags contains bits that are not part of + /// the bitflags definition. + pub fn contains_invalid_bits(&self) -> bool { + let all = Self::all().bits(); + (self.bits() | all) != all + } + } + }; +} + /// Integral type used for buffer offsets. pub type BufferAddress = u64; /// Integral type used for buffer slice sizes. @@ -114,8 +157,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(Backends); +impl_bitflags!(Backends); impl From for Backends { fn from(backend: Backend) -> Self { @@ -187,15 +229,6 @@ bitflags::bitflags! { /// /// This is a web and native feature. const DEPTH_CLIP_CONTROL = 1 << 0; - /// Allows for explicit creation of textures of format [`TextureFormat::Depth24PlusStencil8`] - /// - /// Supported platforms: - /// - Vulkan (some) - /// - DX12 - /// - Metal (Macs with amd GPUs) - /// - /// This is a web and native feature. - const DEPTH24PLUS_STENCIL8 = 1 << 1; /// Allows for explicit creation of textures of format [`TextureFormat::Depth32FloatStencil8`] /// /// Supported platforms: @@ -316,7 +349,9 @@ bitflags::bitflags! { /// the consequences. /// /// Supported platforms: - /// - All + /// - Vulkan + /// - DX12 + /// - Metal /// /// This is a native only feature. const MAPPABLE_PRIMARY_BUFFERS = 1 << 16; @@ -586,6 +621,7 @@ bitflags::bitflags! { /// /// Supported platforms: /// - Vulkan + /// - OpenGL (web only) /// /// This is a native only feature. const MULTIVIEW = 1 << 37; @@ -638,8 +674,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(Features); +impl_bitflags!(Features); impl Features { /// Mask of all features which are part of the upstream WebGPU standard. @@ -1061,7 +1096,7 @@ bitflags::bitflags! { /// Supports samplers with anisotropic filtering. Note this isn't actually required by /// WebGPU, the implementation is allowed to completely ignore aniso clamp. This flag is - /// here for native backends so they can comunicate to the user of aniso is enabled. + /// here for native backends so they can communicate to the user of aniso is enabled. /// /// All backends and all devices support anisotropic filtering. const ANISOTROPIC_FILTERING = 1 << 10; @@ -1085,11 +1120,17 @@ bitflags::bitflags! { /// /// WebGL doesn't support this. const BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED = 1 << 15; + + /// Supports buffers to combine [`BufferUsages::INDEX`] with usages other than [`BufferUsages::COPY_DST`] and [`BufferUsages::COPY_SRC`]. + /// Furthermore, in absence of this feature it is not allowed to copy index buffers from/to buffers with a set of usage flags containing + /// [`BufferUsages::VERTEX`]/[`BufferUsages::UNIFORM`]/[`BufferUsages::STORAGE`] or [`BufferUsages::INDIRECT`]. + /// + /// WebGL doesn't support this. + const UNRESTRICTED_INDEX_BUFFER = 1 << 16; } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(DownlevelFlags); +impl_bitflags!(DownlevelFlags); impl DownlevelFlags { /// All flags that indicate if the backend is WebGPU compliant @@ -1211,8 +1252,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(ShaderStages); +impl_bitflags!(ShaderStages); /// Dimensions of a particular texture view. /// @@ -1627,24 +1667,44 @@ bitflags::bitflags! { /// If not present, the texture can't be sampled with a filtering sampler. /// This may overwrite TextureSampleType::Float.filterable const FILTERABLE = 1 << 0; - /// Allows [`TextureDescriptor::sample_count`] greater than `1`. - const MULTISAMPLE = 1 << 1; + /// Allows [`TextureDescriptor::sample_count`] to be `2`. + const MULTISAMPLE_X2 = 1 << 1; + /// Allows [`TextureDescriptor::sample_count`] to be `4`. + const MULTISAMPLE_X4 = 1 << 2 ; + /// Allows [`TextureDescriptor::sample_count`] to be `8`. + const MULTISAMPLE_X8 = 1 << 3 ; /// Allows a texture of this format to back a view passed as `resolve_target` /// to a render pass for an automatic driver-implemented resolve. - const MULTISAMPLE_RESOLVE = 1 << 2; + const MULTISAMPLE_RESOLVE = 1 << 4; /// When used as a STORAGE texture, then a texture with this format can be bound with /// [`StorageTextureAccess::ReadOnly`] or [`StorageTextureAccess::ReadWrite`]. - const STORAGE_READ_WRITE = 1 << 3; + const STORAGE_READ_WRITE = 1 << 5; /// When used as a STORAGE texture, then a texture with this format can be written to with atomics. // TODO: No access flag exposed as of writing - const STORAGE_ATOMICS = 1 << 4; + const STORAGE_ATOMICS = 1 << 6; /// If not present, the texture can't be blended into the render target. - const BLENDABLE = 1 << 5; + const BLENDABLE = 1 << 7; } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(TextureFormatFeatureFlags); +impl TextureFormatFeatureFlags { + /// Sample count supported by a given texture format. + /// + /// returns `true` if `count` is a supported sample count. + pub fn sample_count_supported(&self, count: u32) -> bool { + use TextureFormatFeatureFlags as tfsc; + + match count { + 1 => true, + 2 => self.contains(tfsc::MULTISAMPLE_X2), + 4 => self.contains(tfsc::MULTISAMPLE_X4), + 8 => self.contains(tfsc::MULTISAMPLE_X8), + _ => false, + } + } +} + +impl_bitflags!(TextureFormatFeatureFlags); /// Features supported by a given texture format /// @@ -2289,7 +2349,6 @@ impl TextureFormat { let astc_hdr = Features::TEXTURE_COMPRESSION_ASTC_HDR; let norm16bit = Features::TEXTURE_FORMAT_16BIT_NORM; let d32_s8 = Features::DEPTH32FLOAT_STENCIL8; - let d24_s8 = Features::DEPTH24PLUS_STENCIL8; // Sample Types let uint = TextureSampleType::Uint; @@ -2307,9 +2366,9 @@ impl TextureFormat { // Multisampling let noaa = TextureFormatFeatureFlags::empty(); - let msaa = TextureFormatFeatureFlags::MULTISAMPLE; - let msaa_resolve = - TextureFormatFeatureFlags::MULTISAMPLE | TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE; + let msaa = TextureFormatFeatureFlags::MULTISAMPLE_X4; + let msaa_resolve = TextureFormatFeatureFlags::MULTISAMPLE_X4 + | TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE; // Flags let basic = @@ -2374,7 +2433,7 @@ impl TextureFormat { // Depth-stencil textures Self::Depth16Unorm => ( native, depth, linear, msaa, (1, 1), 2, attachment, 1), Self::Depth24Plus => ( native, depth, linear, msaa, (1, 1), 4, attachment, 1), - Self::Depth24PlusStencil8 => ( d24_s8, depth, linear, msaa, (1, 1), 4, attachment, 2), + Self::Depth24PlusStencil8 => ( native, depth, linear, msaa, (1, 1), 4, attachment, 2), Self::Depth32Float => ( native, depth, linear, msaa, (1, 1), 4, attachment, 1), Self::Depth32FloatStencil8 =>( d32_s8, depth, linear, msaa, (1, 1), 4, attachment, 2), // Packed uncompressed @@ -3063,8 +3122,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(ColorWrites); +impl_bitflags!(ColorWrites); impl Default for ColorWrites { fn default() -> Self { @@ -3622,8 +3680,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(BufferUsages); +impl_bitflags!(BufferUsages); /// Describes a [`Buffer`](../wgpu/struct.Buffer.html). /// @@ -3761,7 +3818,7 @@ pub enum PresentMode { Mailbox = 5, } -/// Specifies how the alpha channel of the textures should be handled during (martin mouv i step) +/// Specifies how the alpha channel of the textures should be handled during /// compositing. #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -3824,8 +3881,34 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(TextureUsages); +impl_bitflags!(TextureUsages); + +/// Defines the capabilities of a given surface and adapter. +#[derive(Debug)] +pub struct SurfaceCapabilities { + /// List of supported formats to use with the given adapter. The first format in the vector is preferred. + /// + /// Returns an empty vector if the surface is incompatible with the adapter. + pub formats: Vec, + /// List of supported presentation modes to use with the given adapter. + /// + /// Returns an empty vector if the surface is incompatible with the adapter. + pub present_modes: Vec, + /// List of supported alpha modes to use with the given adapter. + /// + /// Will return at least one element, CompositeAlphaMode::Opaque or CompositeAlphaMode::Inherit. + pub alpha_modes: Vec, +} + +impl Default for SurfaceCapabilities { + fn default() -> Self { + Self { + formats: Vec::new(), + present_modes: Vec::new(), + alpha_modes: vec![CompositeAlphaMode::Opaque], + } + } +} /// Configures a [`Surface`] for presentation. /// @@ -5024,8 +5107,7 @@ bitflags::bitflags! { } } -#[cfg(feature = "bitflags_serde_shim")] -bitflags_serde_shim::impl_serde_for_bitflags!(PipelineStatisticsTypes); +impl_bitflags!(PipelineStatisticsTypes); /// Argument buffer layout for draw_indirect commands. #[repr(C)] diff --git a/wgpu/Cargo.toml b/wgpu/Cargo.toml index 2017ada192..36272c0fda 100644 --- a/wgpu/Cargo.toml +++ b/wgpu/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "wgpu" -version = "0.14.0" -authors = ["wgpu developers"] -edition = "2021" +version.workspace = true +authors.workspace = true +edition.workspace = true description = "Rusty WebGPU API wrapper" -homepage = "https://wgpu.rs/" -repository = "https://github.com/gfx-rs/wgpu/tree/v0.13" -keywords = ["graphics"] -license = "MIT OR Apache-2.0" +homepage.workspace = true +repository.workspace = true +keywords.workspace = true +license.workspace = true exclude = [ "etc/**/*", "examples/**/*.png", # Image comparison test @@ -76,7 +76,7 @@ name = "water" test = true [features] -default = ["wgsl"] +default = ["wgsl", "expose-ids"] spirv = ["naga/spv-in"] glsl = ["naga/glsl-in"] wgsl = ["wgc?/wgsl"] @@ -86,82 +86,67 @@ angle = ["wgc/angle"] webgl = ["wgc"] emscripten = ["webgl"] vulkan-portability = ["wgc/vulkan-portability"] +expose-ids = [] [target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgc] -package = "wgpu-core" -path = "../wgpu-core" -version = "0.14" +workspace = true features = ["raw-window-handle"] [target.'cfg(target_arch = "wasm32")'.dependencies.wgc] -package = "wgpu-core" -path = "../wgpu-core" -version = "0.14" +workspace = true features = ["raw-window-handle"] optional = true [dependencies.wgt] -package = "wgpu-types" -path = "../wgpu-types" -version = "0.14" +workspace = true [target.'cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))'.dependencies.hal] -package = "wgpu-hal" -path = "../wgpu-hal" -version = "0.14" +workspace = true [dependencies] -arrayvec = "0.7" -log = "0.4" -# parking_lot 0.12 switches from `winapi` to `windows`; permit either -parking_lot = ">=0.11,<0.13" -raw-window-handle = "0.5" -serde = { version = "1", features = ["derive"], optional = true } -smallvec = "1" -static_assertions = "1.1.0" +arrayvec.workspace = true +log.workspace = true +parking_lot.workspace = true +raw-window-handle.workspace = true +serde = { workspace = true, features = ["derive"], optional = true } +smallvec.workspace = true +static_assertions.workspace = true [dev-dependencies] -bitflags = "1" -bytemuck = { version = "1.4", features = ["derive"] } -glam = "0.21.3" -ddsfile = "0.5" -futures-intrusive = "0.4" -log = "0.4" -# Opt out of noise's "default-features" to avoid "image" feature as a dependency count optimization. -# This will not be required in the next release since it has been removed from the default feature in https://github.com/Razaekel/noise-rs/commit/1af9e1522236b2c584fb9a02150c9c67a5e6bb04#diff-2e9d962a08321605940b5a657135052fbcef87b5e360662bb527c96d9a615542 -noise = { version = "0.7", default-features = false } -obj = "0.10" -png = "0.17" -nanorand = { version = "0.7", default-features = false, features = ["wyrand"] } -winit = "0.27.1" # for "halmark" example +bitflags.workspace = true +cfg-if.workspace = true +bytemuck = { workspace = true, features = ["derive"] } +glam.workspace = true +ddsfile.workspace = true +futures-intrusive.workspace = true +env_logger.workspace = true +log.workspace = true +noise = { workspace = true } +obj.workspace = true +pollster.workspace = true +png.workspace = true +nanorand = { workspace = true, features = ["wyrand"] } +winit.workspace = true # for "halmark" example # for "halmark" example [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] -async-executor = "1.0" -pollster = "0.2" -env_logger = "0.9" +async-executor.workspace = true [dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" -version = "0.10" +workspace = true features = ["clone"] optional = true # used to test all the example shaders [dev-dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" -version = "0.10" +workspace = true features = ["wgsl-in"] [target.'cfg(target_arch = "wasm32")'.dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "c52d9102" -version = "0.10" +workspace = true features = ["wgsl-out"] [target.'cfg(target_arch = "wasm32")'.dependencies] -web-sys = { version = "0.3.60", features = [ +web-sys = { workspace = true, features = [ "Document", "Navigator", "Node", @@ -290,17 +275,16 @@ web-sys = { version = "0.3.60", features = [ "WorkerGlobalScope", "WorkerNavigator" ] } -wasm-bindgen = "0.2.83" -js-sys = "0.3.60" -wasm-bindgen-futures = "0.4.33" -# parking_lot 0.12 switches from `winapi` to `windows`; permit either -parking_lot = ">=0.11,<0.13" +wasm-bindgen.workspace = true +js-sys.workspace = true +wasm-bindgen-futures.workspace = true +parking_lot.workspace = true [target.'cfg(target_arch = "wasm32")'.dev-dependencies] -console_error_panic_hook = "0.1.7" -console_log = "0.2" +console_error_panic_hook.workspace = true +console_log.workspace = true # We need these features in the framework examples -web-sys = { version = "0.3.60", features = [ +web-sys = { workspace = true, features = [ "Location", "Blob", "RequestInit", diff --git a/wgpu/examples/README.md b/wgpu/examples/README.md index efbf6fa776..acf0da0c70 100644 --- a/wgpu/examples/README.md +++ b/wgpu/examples/README.md @@ -1,7 +1,7 @@ ## Structure For the simplest examples without using any helping code (see `framework.rs` here), check out: - - `hello ` for printing adapter information + - `hello` for printing adapter information - `hello-triangle` for graphics and presentation - `hello-compute` for pure computing diff --git a/wgpu/examples/cube/main.rs b/wgpu/examples/cube/main.rs index dadf81b0ac..5f0ffb9dcf 100644 --- a/wgpu/examples/cube/main.rs +++ b/wgpu/examples/cube/main.rs @@ -415,7 +415,7 @@ fn cube() { optional_features: wgpu::Features::default(), base_test_parameters: framework::test_common::TestParameters::default(), tolerance: 1, - max_outliers: 500, // Bounded by rpi4 + max_outliers: 1225, // Bounded by swiftshader }); } @@ -428,6 +428,6 @@ fn cube_lines() { optional_features: wgpu::Features::POLYGON_MODE_LINE, base_test_parameters: framework::test_common::TestParameters::default(), tolerance: 2, - max_outliers: 600, // Bounded by rpi4 on GL + max_outliers: 1250, // Bounded by swiftshader }); } diff --git a/wgpu/examples/framework.rs b/wgpu/examples/framework.rs index 5474ff6d5f..122c88170d 100644 --- a/wgpu/examples/framework.rs +++ b/wgpu/examples/framework.rs @@ -163,9 +163,9 @@ async fn setup(title: &str) -> Setup { let (size, surface) = unsafe { let size = window.inner_size(); - #[cfg(not(target_arch = "wasm32"))] + #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] let surface = instance.create_surface(&window); - #[cfg(target_arch = "wasm32")] + #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] let surface = { if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup { log::info!("Creating surface from OffscreenCanvas"); @@ -267,7 +267,9 @@ fn start( }: Setup, ) { let spawner = Spawner::new(); - let mut config = surface.get_default_config(&adapter, size.width, size.height); + let mut config = surface + .get_default_config(&adapter, size.width, size.height) + .expect("Surface isn't supported by the adapter."); surface.configure(&device, &config); log::info!("Initializing the example..."); diff --git a/wgpu/examples/hello-triangle/main.rs b/wgpu/examples/hello-triangle/main.rs index c61ce36882..4c19babf3c 100644 --- a/wgpu/examples/hello-triangle/main.rs +++ b/wgpu/examples/hello-triangle/main.rs @@ -46,7 +46,8 @@ async fn run(event_loop: EventLoop<()>, window: Window) { push_constant_ranges: &[], }); - let swapchain_format = surface.get_supported_formats(&adapter)[0]; + let swapchain_capabilities = surface.get_capabilities(&adapter); + let swapchain_format = swapchain_capabilities.formats[0]; let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { label: None, @@ -73,7 +74,7 @@ async fn run(event_loop: EventLoop<()>, window: Window) { width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, - alpha_mode: surface.get_supported_alpha_modes(&adapter)[0], + alpha_mode: swapchain_capabilities.alpha_modes[0], }; surface.configure(&device, &config); diff --git a/wgpu/examples/hello-windows/main.rs b/wgpu/examples/hello-windows/main.rs index ade351fee9..0dcc87daac 100644 --- a/wgpu/examples/hello-windows/main.rs +++ b/wgpu/examples/hello-windows/main.rs @@ -31,13 +31,14 @@ impl ViewportDesc { fn build(self, adapter: &wgpu::Adapter, device: &wgpu::Device) -> Viewport { let size = self.window.inner_size(); + let caps = self.surface.get_capabilities(adapter); let config = wgpu::SurfaceConfiguration { usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: self.surface.get_supported_formats(adapter)[0], + format: caps.formats[0], width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, - alpha_mode: self.surface.get_supported_alpha_modes(adapter)[0], + alpha_mode: caps.alpha_modes[0], }; self.surface.configure(device, &config); diff --git a/wgpu/examples/mipmap/main.rs b/wgpu/examples/mipmap/main.rs index 70ecbd91a0..5e62e35c06 100644 --- a/wgpu/examples/mipmap/main.rs +++ b/wgpu/examples/mipmap/main.rs @@ -400,9 +400,9 @@ impl framework::Example for Example { .slice(pipeline_statistics_offset()..) .get_mapped_range(); // Convert the raw data into a useful structure - let timestamp_data: &TimestampQueries = bytemuck::from_bytes(&*timestamp_view); + let timestamp_data: &TimestampQueries = bytemuck::from_bytes(×tamp_view); let pipeline_stats_data: &PipelineStatisticsQueries = - bytemuck::from_bytes(&*pipeline_stats_view); + bytemuck::from_bytes(&pipeline_stats_view); // Iterate over the data for (idx, (timestamp, pipeline)) in timestamp_data .iter() diff --git a/wgpu/examples/msaa-line/main.rs b/wgpu/examples/msaa-line/main.rs index 27d1e80798..b39c144877 100644 --- a/wgpu/examples/msaa-line/main.rs +++ b/wgpu/examples/msaa-line/main.rs @@ -32,6 +32,7 @@ struct Example { sample_count: u32, rebuild_bundle: bool, config: wgpu::SurfaceConfiguration, + max_sample_count: u32, } impl Example { @@ -117,6 +118,10 @@ impl Example { } impl framework::Example for Example { + fn optional_features() -> wgt::Features { + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES + } + fn init( config: &wgpu::SurfaceConfiguration, _adapter: &wgpu::Adapter, @@ -124,7 +129,22 @@ impl framework::Example for Example { _queue: &wgpu::Queue, ) -> Self { log::info!("Press left/right arrow keys to change sample_count."); - let sample_count = 4; + + let sample_flags = _adapter.get_texture_format_features(config.format).flags; + + let max_sample_count = { + if sample_flags.contains(wgpu::TextureFormatFeatureFlags::MULTISAMPLE_X8) { + 8 + } else if sample_flags.contains(wgpu::TextureFormatFeatureFlags::MULTISAMPLE_X4) { + 4 + } else if sample_flags.contains(wgpu::TextureFormatFeatureFlags::MULTISAMPLE_X2) { + 2 + } else { + 1 + } + }; + + let sample_count = max_sample_count; let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { label: None, @@ -181,6 +201,7 @@ impl framework::Example for Example { vertex_buffer, vertex_count, sample_count, + max_sample_count, rebuild_bundle: false, config: config.clone(), } @@ -195,14 +216,14 @@ impl framework::Example for Example { // TODO: Switch back to full scans of possible options when we expose // supported sample counts to the user. Some(winit::event::VirtualKeyCode::Left) => { - if self.sample_count == 4 { + if self.sample_count == self.max_sample_count { self.sample_count = 1; self.rebuild_bundle = true; } } Some(winit::event::VirtualKeyCode::Right) => { if self.sample_count == 1 { - self.sample_count = 4; + self.sample_count = self.max_sample_count; self.rebuild_bundle = true; } } @@ -295,7 +316,8 @@ fn msaa_line() { image_path: "/examples/msaa-line/screenshot.png", width: 1024, height: 768, - optional_features: wgpu::Features::default(), + optional_features: wgpu::Features::default() + | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, base_test_parameters: framework::test_common::TestParameters::default(), tolerance: 64, max_outliers: 1 << 16, // MSAA is comically different between vendors, 32k is a decent limit diff --git a/wgpu/examples/shadow/main.rs b/wgpu/examples/shadow/main.rs index 72106ebfd2..d2eba66c30 100644 --- a/wgpu/examples/shadow/main.rs +++ b/wgpu/examples/shadow/main.rs @@ -855,6 +855,6 @@ fn shadow() { // llvmpipe versions in CI are flaky: https://github.com/gfx-rs/wgpu/issues/2594 .specific_failure(Some(wgpu::Backends::VULKAN), None, Some("llvmpipe"), true), tolerance: 2, - max_outliers: 500, // bounded by rpi4 + max_outliers: 1075, // bounded by swiftshader }); } diff --git a/wgpu/examples/skybox/main.rs b/wgpu/examples/skybox/main.rs index 3b4a152c1d..d5b08634af 100644 --- a/wgpu/examples/skybox/main.rs +++ b/wgpu/examples/skybox/main.rs @@ -472,10 +472,9 @@ fn skybox() { width: 1024, height: 768, optional_features: wgpu::Features::default(), - base_test_parameters: framework::test_common::TestParameters::default() - .backend_failure(wgpu::Backends::GL), + base_test_parameters: framework::test_common::TestParameters::default(), tolerance: 3, - max_outliers: 3, + max_outliers: 207, // bounded by swiftshader }); } @@ -488,7 +487,7 @@ fn skybox_bc1() { optional_features: wgpu::Features::TEXTURE_COMPRESSION_BC, base_test_parameters: framework::test_common::TestParameters::default(), // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056 tolerance: 5, - max_outliers: 105, // Bounded by llvmpipe + max_outliers: 191, // Bounded by swiftshader }); } @@ -501,7 +500,7 @@ fn skybox_etc2() { optional_features: wgpu::Features::TEXTURE_COMPRESSION_ETC2, base_test_parameters: framework::test_common::TestParameters::default(), // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056 tolerance: 5, - max_outliers: 105, // Bounded by llvmpipe + max_outliers: 248, // Bounded by swiftshader }); } diff --git a/wgpu/examples/water/main.rs b/wgpu/examples/water/main.rs index d246e9a678..aa785c6ec0 100644 --- a/wgpu/examples/water/main.rs +++ b/wgpu/examples/water/main.rs @@ -826,9 +826,8 @@ fn water() { height: 768, optional_features: wgpu::Features::default(), base_test_parameters: framework::test_common::TestParameters::default() - .downlevel_flags(wgpu::DownlevelFlags::READ_ONLY_DEPTH_STENCIL) - .specific_failure(Some(wgpu::Backends::DX12), None, Some("Basic"), false), // WARP has a bug https://github.com/gfx-rs/wgpu/issues/1730 + .downlevel_flags(wgpu::DownlevelFlags::READ_ONLY_DEPTH_STENCIL), tolerance: 5, - max_outliers: 470, // bounded by DX12, then AMD Radeon Polaris12 on vk linux + max_outliers: 1693, // bounded by swiftshader }); } diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 3d33e2c768..7eb0d4eba1 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -4,7 +4,7 @@ use crate::{ DownlevelCapabilities, Features, Label, Limits, LoadOp, MapMode, Operations, PipelineLayoutDescriptor, RenderBundleEncoderDescriptor, RenderPipelineDescriptor, SamplerDescriptor, ShaderModuleDescriptor, ShaderModuleDescriptorSpirV, ShaderSource, - SurfaceStatus, TextureDescriptor, TextureFormat, TextureViewDescriptor, + SurfaceStatus, TextureDescriptor, TextureViewDescriptor, }; use arrayvec::ArrayVec; @@ -19,7 +19,6 @@ use std::{ slice, sync::Arc, }; -use wgt::{CompositeAlphaMode, PresentMode}; const LABEL: &str = "label"; @@ -40,25 +39,26 @@ impl fmt::Debug for Context { impl Context { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] pub unsafe fn from_hal_instance(hal_instance: A::Instance) -> Self { - Self(wgc::hub::Global::from_hal_instance::( - "wgpu", - wgc::hub::IdentityManagerFactory, - hal_instance, - )) + Self(unsafe { + wgc::hub::Global::from_hal_instance::( + "wgpu", + wgc::hub::IdentityManagerFactory, + hal_instance, + ) + }) } /// # Safety /// /// - The raw instance handle returned must not be manually destroyed. pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { - self.0.instance_as_hal::() + unsafe { self.0.instance_as_hal::() } } pub unsafe fn from_core_instance(core_instance: wgc::instance::Instance) -> Self { - Self(wgc::hub::Global::from_instance( - wgc::hub::IdentityManagerFactory, - core_instance, - )) + Self(unsafe { + wgc::hub::Global::from_instance(wgc::hub::IdentityManagerFactory, core_instance) + }) } pub(crate) fn global(&self) -> &wgc::hub::Global { @@ -76,7 +76,7 @@ impl Context { &self, hal_adapter: hal::ExposedAdapter, ) -> wgc::id::AdapterId { - self.0.create_adapter_from_hal(hal_adapter, ()) + unsafe { self.0.create_adapter_from_hal(hal_adapter, ()) } } pub unsafe fn adapter_as_hal) -> R, R>( @@ -84,8 +84,10 @@ impl Context { adapter: wgc::id::AdapterId, hal_adapter_callback: F, ) -> R { - self.0 - .adapter_as_hal::(adapter, hal_adapter_callback) + unsafe { + self.0 + .adapter_as_hal::(adapter, hal_adapter_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -95,24 +97,31 @@ impl Context { hal_device: hal::OpenDevice, desc: &crate::DeviceDescriptor, trace_dir: Option<&std::path::Path>, - ) -> Result<(Device, wgc::id::QueueId), crate::RequestDeviceError> { - let global = &self.0; - let (device_id, error) = global.create_device_from_hal( - *adapter, - hal_device, - &desc.map_label(|l| l.map(Borrowed)), - trace_dir, - (), - ); + ) -> Result<(Device, Queue), crate::RequestDeviceError> { + let global = &self.0; + let (device_id, error) = unsafe { + global.create_device_from_hal( + *adapter, + hal_device, + &desc.map_label(|l| l.map(Borrowed)), + trace_dir, + (), + ) + }; if let Some(err) = error { self.handle_error_fatal(err, "Adapter::create_device_from_hal"); } + let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + error_sink: error_sink.clone(), features: desc.features, }; - Ok((device, device_id)) + let queue = Queue { + id: device_id, + error_sink, + }; + Ok((device, queue)) } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -123,12 +132,14 @@ impl Context { desc: &TextureDescriptor, ) -> Texture { let global = &self.0; - let (id, error) = global.create_texture_from_hal::( - hal_texture, - device.id, - &desc.map_label(|l| l.map(Borrowed)), - (), - ); + let (id, error) = unsafe { + global.create_texture_from_hal::( + hal_texture, + device.id, + &desc.map_label(|l| l.map(Borrowed)), + (), + ) + }; if let Some(cause) = error { self.handle_error( &device.error_sink, @@ -150,8 +161,26 @@ impl Context { device: &Device, hal_device_callback: F, ) -> R { - self.0 - .device_as_hal::(device.id, hal_device_callback) + unsafe { + self.0 + .device_as_hal::(device.id, hal_device_callback) + } + } + + #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] + pub unsafe fn surface_as_hal_mut< + A: wgc::hub::HalApi, + F: FnOnce(Option<&mut A::Surface>) -> R, + R, + >( + &self, + surface: &Surface, + hal_surface_callback: F, + ) -> R { + unsafe { + self.0 + .surface_as_hal_mut::(surface.id, hal_surface_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -160,8 +189,10 @@ impl Context { texture: &Texture, hal_texture_callback: F, ) { - self.0 - .texture_as_hal::(texture.id, hal_texture_callback) + unsafe { + self.0 + .texture_as_hal::(texture.id, hal_texture_callback) + } } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -213,7 +244,7 @@ impl Context { self: &Arc, visual: *mut std::ffi::c_void, ) -> crate::Surface { - let id = self.0.instance_create_surface_from_visual(visual, ()); + let id = unsafe { self.0.instance_create_surface_from_visual(visual, ()) }; crate::Surface { context: Arc::clone(self), id: Surface { @@ -794,6 +825,18 @@ pub struct Texture { error_sink: ErrorSink, } +#[derive(Debug)] +pub struct Queue { + id: wgc::id::QueueId, + error_sink: ErrorSink, +} + +impl Queue { + pub(crate) fn backend(&self) -> wgt::Backend { + self.id.backend() + } +} + #[derive(Debug)] pub(crate) struct CommandEncoder { id: wgc::id::CommandEncoderId, @@ -801,10 +844,65 @@ pub(crate) struct CommandEncoder { open: bool, } +impl crate::GlobalId for T { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + T::into_raw(*self).get().into() + } +} + +impl crate::GlobalId for Surface { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + +impl crate::GlobalId for Device { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + +impl crate::GlobalId for Buffer { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + +impl crate::GlobalId for Texture { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + +impl crate::GlobalId for CommandEncoder { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + +impl crate::GlobalId for Queue { + #[allow(clippy::useless_conversion)] // because not(id32) + fn global_id(&self) -> u64 { + use wgc::id::TypedId; + self.id.into_raw().get().into() + } +} + impl crate::Context for Context { type AdapterId = wgc::id::AdapterId; type DeviceId = Device; - type QueueId = wgc::id::QueueId; + type QueueId = Queue; type ShaderModuleId = wgc::id::ShaderModuleId; type BindGroupLayoutId = wgc::id::BindGroupLayoutId; type BindGroupId = wgc::id::BindGroupId; @@ -894,12 +992,17 @@ impl crate::Context for Context { log::error!("Error in Adapter::request_device: {}", err); return ready(Err(crate::RequestDeviceError)); } + let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + error_sink: error_sink.clone(), features: desc.features, }; - ready(Ok((device, device_id))) + let queue = Queue { + id: device_id, + error_sink, + }; + ready(Ok((device, queue))) } fn adapter_is_surface_supported( @@ -960,47 +1063,18 @@ impl crate::Context for Context { } } - fn surface_get_supported_formats( + fn surface_get_capabilities( &self, surface: &Self::SurfaceId, adapter: &Self::AdapterId, - ) -> Vec { + ) -> wgt::SurfaceCapabilities { let global = &self.0; - match wgc::gfx_select!(adapter => global.surface_get_supported_formats(surface.id, *adapter)) - { - Ok(formats) => formats, - Err(wgc::instance::GetSurfaceSupportError::Unsupported) => vec![], - Err(err) => self.handle_error_fatal(err, "Surface::get_supported_formats"), - } - } - - fn surface_get_supported_present_modes( - &self, - surface: &Self::SurfaceId, - adapter: &Self::AdapterId, - ) -> Vec { - let global = &self.0; - match wgc::gfx_select!(adapter => global.surface_get_supported_present_modes(surface.id, *adapter)) - { - Ok(modes) => modes, - Err(wgc::instance::GetSurfaceSupportError::Unsupported) => vec![], - Err(err) => self.handle_error_fatal(err, "Surface::get_supported_present_modes"), - } - } - - fn surface_get_supported_alpha_modes( - &self, - surface: &Self::SurfaceId, - adapter: &Self::AdapterId, - ) -> Vec { - let global = &self.0; - match wgc::gfx_select!(adapter => global.surface_get_supported_alpha_modes(surface.id, *adapter)) - { - Ok(modes) => modes, + match wgc::gfx_select!(adapter => global.surface_get_capabilities(surface.id, *adapter)) { + Ok(caps) => caps, Err(wgc::instance::GetSurfaceSupportError::Unsupported) => { - vec![CompositeAlphaMode::Opaque] + wgt::SurfaceCapabilities::default() } - Err(err) => self.handle_error_fatal(err, "Surface::get_supported_alpha_modes"), + Err(err) => self.handle_error_fatal(err, "Surface::get_supported_formats"), } } @@ -1174,7 +1248,7 @@ impl crate::Context for Context { label: desc.label.map(Borrowed), // Doesn't matter the value since spirv shaders aren't mutated to include // runtime checks - shader_bound_checks: wgt::ShaderBoundChecks::unchecked(), + shader_bound_checks: unsafe { wgt::ShaderBoundChecks::unchecked() }, }; let (id, error) = wgc::gfx_select!( device.id => global.device_create_shader_module_spirv(device.id, &descriptor, Borrowed(&desc.source), ()) @@ -1701,10 +1775,7 @@ impl crate::Context for Context { MapMode::Write => wgc::device::HostMap::Write, }, callback: wgc::resource::BufferMapCallback::from_rust(Box::new(|status| { - let res = match status { - wgc::resource::BufferMapAsyncStatus::Success => Ok(()), - _ => Err(crate::BufferAsyncError), - }; + let res = status.map_err(|_| crate::BufferAsyncError); callback(res); })), }; @@ -2216,7 +2287,7 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_write_buffer(*queue, buffer.id, offset, data) + *queue => global.queue_write_buffer(queue.id, buffer.id, offset, data) ) { Ok(()) => (), Err(err) => self.handle_error_fatal(err, "Queue::write_buffer"), @@ -2232,7 +2303,7 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_validate_write_buffer(*queue, buffer.id, offset, size.get()) + *queue => global.queue_validate_write_buffer(queue.id, buffer.id, offset, size.get()) ) { Ok(()) => (), Err(err) => self.handle_error_fatal(err, "Queue::write_buffer_with"), @@ -2246,7 +2317,7 @@ impl crate::Context for Context { ) -> QueueWriteBuffer { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_create_staging_buffer(*queue, size, ()) + *queue => global.queue_create_staging_buffer(queue.id, size, ()) ) { Ok((buffer_id, ptr)) => QueueWriteBuffer { buffer_id, @@ -2268,10 +2339,12 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_write_staging_buffer(*queue, buffer.id, offset, staging_buffer.buffer_id) + *queue => global.queue_write_staging_buffer(queue.id, buffer.id, offset, staging_buffer.buffer_id) ) { Ok(()) => (), - Err(err) => self.handle_error_fatal(err, "Queue::write_buffer_with"), + Err(err) => { + self.handle_error_nolabel(&queue.error_sink, err, "Queue::write_buffer_with"); + } } } @@ -2285,14 +2358,14 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!(*queue => global.queue_write_texture( - *queue, + queue.id, &map_texture_copy_view(texture), data, &data_layout, &size )) { Ok(()) => (), - Err(err) => self.handle_error_fatal(err, "Queue::write_texture"), + Err(err) => self.handle_error_nolabel(&queue.error_sink, err, "Queue::write_texture"), } } @@ -2304,7 +2377,7 @@ impl crate::Context for Context { let temp_command_buffers = command_buffers.collect::>(); let global = &self.0; - match wgc::gfx_select!(*queue => global.queue_submit(*queue, &temp_command_buffers)) { + match wgc::gfx_select!(*queue => global.queue_submit(queue.id, &temp_command_buffers)) { Ok(index) => index, Err(err) => self.handle_error_fatal(err, "Queue::submit"), } @@ -2313,7 +2386,7 @@ impl crate::Context for Context { fn queue_get_timestamp_period(&self, queue: &Self::QueueId) -> f32 { let global = &self.0; let res = wgc::gfx_select!(queue => global.queue_get_timestamp_period( - *queue + queue.id )); match res { Ok(v) => v, @@ -2331,7 +2404,7 @@ impl crate::Context for Context { let closure = wgc::device::queue::SubmittedWorkDoneClosure::from_rust(callback); let global = &self.0; - let res = wgc::gfx_select!(queue => global.queue_on_submitted_work_done(*queue, closure)); + let res = wgc::gfx_select!(queue => global.queue_on_submitted_work_done(queue.id, closure)); if let Err(cause) = res { self.handle_error_fatal(cause, "Queue::on_submitted_work_done"); } diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index 01d454601e..6e4e6d4496 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -25,6 +25,39 @@ pub(crate) struct Sendable(T); unsafe impl Send for Sendable {} unsafe impl Sync for Sendable {} +#[derive(Clone, Debug)] +pub(crate) struct Identified(T, #[cfg(feature = "expose-ids")] u64); +unsafe impl Send for Identified {} +unsafe impl Sync for Identified {} + +impl crate::GlobalId for Identified { + #[cfg(not(feature = "expose-ids"))] + fn global_id(&self) -> u64 { + 0 + } + + #[cfg(feature = "expose-ids")] + fn global_id(&self) -> u64 { + self.1 + } +} + +#[cfg(feature = "expose-ids")] +static NEXT_ID: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); + +#[cfg(not(feature = "expose-ids"))] +fn create_identified(value: T) -> Identified { + Identified(value) +} + +#[cfg(feature = "expose-ids")] +fn create_identified(value: T) -> Identified { + Identified( + value, + NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed), + ) +} + pub(crate) struct Context(web_sys::Gpu); unsafe impl Send for Context {} unsafe impl Sync for Context {} @@ -93,13 +126,13 @@ impl MakeSendFuture { unsafe impl Send for MakeSendFuture {} impl crate::ComputePassInner for ComputePass { - fn set_pipeline(&mut self, pipeline: &Sendable) { + fn set_pipeline(&mut self, pipeline: &Identified) { self.0.set_pipeline(&pipeline.0); } fn set_bind_group( &mut self, index: u32, - bind_group: &Sendable, + bind_group: &Identified, offsets: &[wgt::DynamicOffset], ) { self.0 @@ -137,20 +170,24 @@ impl crate::ComputePassInner for ComputePass { } fn dispatch_workgroups_indirect( &mut self, - indirect_buffer: &Sendable, + indirect_buffer: &Identified, indirect_offset: wgt::BufferAddress, ) { self.0 .dispatch_workgroups_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); } - fn write_timestamp(&mut self, _query_set: &Sendable, _query_index: u32) { + fn write_timestamp( + &mut self, + _query_set: &Identified, + _query_index: u32, + ) { panic!("WRITE_TIMESTAMP_INSIDE_PASSES feature must be enabled to call write_timestamp in a compute pass") } fn begin_pipeline_statistics_query( &mut self, - _query_set: &Sendable, + _query_set: &Identified, _query_index: u32, ) { // Not available in gecko yet @@ -162,13 +199,13 @@ impl crate::ComputePassInner for ComputePass { } impl crate::RenderInner for RenderPass { - fn set_pipeline(&mut self, pipeline: &Sendable) { + fn set_pipeline(&mut self, pipeline: &Identified) { self.0.set_pipeline(&pipeline.0); } fn set_bind_group( &mut self, index: u32, - bind_group: &Sendable, + bind_group: &Identified, offsets: &[wgt::DynamicOffset], ) { self.0 @@ -182,7 +219,7 @@ impl crate::RenderInner for RenderPass { } fn set_index_buffer( &mut self, - buffer: &Sendable, + buffer: &Identified, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, @@ -208,7 +245,7 @@ impl crate::RenderInner for RenderPass { fn set_vertex_buffer( &mut self, slot: u32, - buffer: &Sendable, + buffer: &Identified, offset: wgt::BufferAddress, size: Option, ) { @@ -251,7 +288,7 @@ impl crate::RenderInner for RenderPass { } fn draw_indirect( &mut self, - indirect_buffer: &Sendable, + indirect_buffer: &Identified, indirect_offset: wgt::BufferAddress, ) { self.0 @@ -259,7 +296,7 @@ impl crate::RenderInner for RenderPass { } fn draw_indexed_indirect( &mut self, - indirect_buffer: &Sendable, + indirect_buffer: &Identified, indirect_offset: wgt::BufferAddress, ) { self.0 @@ -267,7 +304,7 @@ impl crate::RenderInner for RenderPass { } fn multi_draw_indirect( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, _count: u32, ) { @@ -275,7 +312,7 @@ impl crate::RenderInner for RenderPass { } fn multi_draw_indexed_indirect( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, _count: u32, ) { @@ -283,9 +320,9 @@ impl crate::RenderInner for RenderPass { } fn multi_draw_indirect_count( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Sendable, + _count_buffer: &Identified, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, ) { @@ -295,9 +332,9 @@ impl crate::RenderInner for RenderPass { } fn multi_draw_indexed_indirect_count( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Sendable, + _count_buffer: &Identified, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, ) { @@ -306,13 +343,13 @@ impl crate::RenderInner for RenderPass { } impl crate::RenderInner for RenderBundleEncoder { - fn set_pipeline(&mut self, pipeline: &Sendable) { + fn set_pipeline(&mut self, pipeline: &Identified) { self.0.set_pipeline(&pipeline.0); } fn set_bind_group( &mut self, index: u32, - bind_group: &Sendable, + bind_group: &Identified, offsets: &[wgt::DynamicOffset], ) { self.0 @@ -326,7 +363,7 @@ impl crate::RenderInner for RenderBundleEncoder { } fn set_index_buffer( &mut self, - buffer: &Sendable, + buffer: &Identified, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, @@ -352,7 +389,7 @@ impl crate::RenderInner for RenderBundleEncoder { fn set_vertex_buffer( &mut self, slot: u32, - buffer: &Sendable, + buffer: &Identified, offset: wgt::BufferAddress, size: Option, ) { @@ -395,7 +432,7 @@ impl crate::RenderInner for RenderBundleEncoder { } fn draw_indirect( &mut self, - indirect_buffer: &Sendable, + indirect_buffer: &Identified, indirect_offset: wgt::BufferAddress, ) { self.0 @@ -403,7 +440,7 @@ impl crate::RenderInner for RenderBundleEncoder { } fn draw_indexed_indirect( &mut self, - indirect_buffer: &Sendable, + indirect_buffer: &Identified, indirect_offset: wgt::BufferAddress, ) { self.0 @@ -411,7 +448,7 @@ impl crate::RenderInner for RenderBundleEncoder { } fn multi_draw_indirect( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, _count: u32, ) { @@ -419,7 +456,7 @@ impl crate::RenderInner for RenderBundleEncoder { } fn multi_draw_indexed_indirect( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, _count: u32, ) { @@ -427,9 +464,9 @@ impl crate::RenderInner for RenderBundleEncoder { } fn multi_draw_indirect_count( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Sendable, + _count_buffer: &Identified, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, ) { @@ -439,9 +476,9 @@ impl crate::RenderInner for RenderBundleEncoder { } fn multi_draw_indexed_indirect_count( &mut self, - _indirect_buffer: &Sendable, + _indirect_buffer: &Identified, _indirect_offset: wgt::BufferAddress, - _count_buffer: &Sendable, + _count_buffer: &Identified, _count_buffer_offset: wgt::BufferAddress, _max_count: u32, ) { @@ -488,7 +525,7 @@ impl crate::RenderPassInner for RenderPass { // self.0.pop_debug_group(); } - fn execute_bundles<'a, I: Iterator>>( + fn execute_bundles<'a, I: Iterator>>( &mut self, render_bundles: I, ) { @@ -498,13 +535,17 @@ impl crate::RenderPassInner for RenderPass { self.0.execute_bundles(&mapped); } - fn write_timestamp(&mut self, _query_set: &Sendable, _query_index: u32) { + fn write_timestamp( + &mut self, + _query_set: &Identified, + _query_index: u32, + ) { panic!("WRITE_TIMESTAMP_INSIDE_PASSES feature must be enabled to call write_timestamp in a compute pass") } fn begin_pipeline_statistics_query( &mut self, - _query_set: &Sendable, + _query_set: &Identified, _query_index: u32, ) { // Not available in gecko yet @@ -903,22 +944,28 @@ fn map_map_mode(mode: crate::MapMode) -> u32 { type JsFutureResult = Result; -fn future_request_adapter(result: JsFutureResult) -> Option> { +fn future_request_adapter(result: JsFutureResult) -> Option> { match result.and_then(wasm_bindgen::JsCast::dyn_into) { - Ok(adapter) => Some(Sendable(adapter)), + Ok(adapter) => Some(create_identified(adapter)), Err(_) => None, } } fn future_request_device( result: JsFutureResult, -) -> Result<(Sendable, Sendable), crate::RequestDeviceError> -{ +) -> Result< + ( + Identified, + Identified, + ), + crate::RequestDeviceError, +> { result .map(|js_value| { let device_id = web_sys::GpuDevice::from(js_value); let queue_id = device_id.queue(); - (Sendable(device_id), Sendable(queue_id)) + + (create_identified(device_id), create_identified(queue_id)) }) .map_err(|_| crate::RequestDeviceError) } @@ -984,7 +1031,7 @@ impl Context { Ok(Some(ctx)) => ctx.into(), _ => panic!("expected to get context from canvas"), }; - Sendable(context.into()) + create_identified(context.into()) } pub fn instance_create_surface_from_offscreen_canvas( @@ -995,12 +1042,12 @@ impl Context { Ok(Some(ctx)) => ctx.into(), _ => panic!("expected to get context from canvas"), }; - Sendable(context.into()) + create_identified(context.into()) } pub fn queue_copy_external_image_to_texture( &self, - queue: &Sendable, + queue: &Identified, image: &web_sys::ImageBitmap, texture: crate::ImageCopyTexture, size: wgt::Extent3d, @@ -1037,27 +1084,27 @@ extern "C" { pub struct SubmissionIndex; impl crate::Context for Context { - type AdapterId = Sendable; - type DeviceId = Sendable; - type QueueId = Sendable; - type ShaderModuleId = Sendable; - type BindGroupLayoutId = Sendable; - type BindGroupId = Sendable; - type TextureViewId = Sendable; - type SamplerId = Sendable; - type BufferId = Sendable; - type TextureId = Sendable; - type QuerySetId = Sendable; - type PipelineLayoutId = Sendable; - type RenderPipelineId = Sendable; - type ComputePipelineId = Sendable; + type AdapterId = Identified; + type DeviceId = Identified; + type QueueId = Identified; + type ShaderModuleId = Identified; + type BindGroupLayoutId = Identified; + type BindGroupId = Identified; + type TextureViewId = Identified; + type SamplerId = Identified; + type BufferId = Identified; + type TextureId = Identified; + type QuerySetId = Identified; + type PipelineLayoutId = Identified; + type RenderPipelineId = Identified; + type ComputePipelineId = Identified; type CommandEncoderId = Sendable; type ComputePassId = ComputePass; type RenderPassId = RenderPass; type CommandBufferId = Sendable; type RenderBundleEncoderId = RenderBundleEncoder; - type RenderBundleId = Sendable; - type SurfaceId = Sendable; + type RenderBundleId = Identified; + type SurfaceId = Identified; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndex = SubmissionIndex; @@ -1275,33 +1322,22 @@ impl crate::Context for Context { format.describe().guaranteed_format_features } - fn surface_get_supported_formats( + fn surface_get_capabilities( &self, _surface: &Self::SurfaceId, _adapter: &Self::AdapterId, - ) -> Vec { - // https://gpuweb.github.io/gpuweb/#supported-context-formats - vec![ - wgt::TextureFormat::Bgra8Unorm, - wgt::TextureFormat::Rgba8Unorm, - wgt::TextureFormat::Rgba16Float, - ] - } - - fn surface_get_supported_present_modes( - &self, - _surface: &Self::SurfaceId, - _adapter: &Self::AdapterId, - ) -> Vec { - vec![wgt::PresentMode::Fifo] - } - - fn surface_get_supported_alpha_modes( - &self, - _surface: &Self::SurfaceId, - _adapter: &Self::AdapterId, - ) -> Vec { - vec![wgt::CompositeAlphaMode::Opaque] + ) -> wgt::SurfaceCapabilities { + wgt::SurfaceCapabilities { + // https://gpuweb.github.io/gpuweb/#supported-context-formats + formats: vec![ + wgt::TextureFormat::Bgra8Unorm, + wgt::TextureFormat::Rgba8Unorm, + wgt::TextureFormat::Rgba16Float, + ], + // Doesn't really have meaning on the web. + present_modes: vec![wgt::PresentMode::Fifo], + alpha_modes: vec![wgt::CompositeAlphaMode::Opaque], + } } fn surface_configure( @@ -1338,7 +1374,7 @@ impl crate::Context for Context { Self::SurfaceOutputDetail, ) { ( - Some(Sendable(surface.0.get_current_texture())), + Some(create_identified(surface.0.get_current_texture())), wgt::SurfaceStatus::Good, (), ) @@ -1462,7 +1498,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { descriptor.label(label); } - Sendable(device.0.create_shader_module(&descriptor)) + create_identified(device.0.create_shader_module(&descriptor)) } fn device_create_bind_group_layout( @@ -1560,7 +1596,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_bind_group_layout(&mapped_desc)) + create_identified(device.0.create_bind_group_layout(&mapped_desc)) } unsafe fn device_create_shader_module_spirv( @@ -1618,7 +1654,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_bind_group(&mapped_desc)) + create_identified(device.0.create_bind_group(&mapped_desc)) } fn device_create_pipeline_layout( @@ -1635,7 +1671,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_pipeline_layout(&mapped_desc)) + create_identified(device.0.create_pipeline_layout(&mapped_desc)) } fn device_create_render_pipeline( @@ -1726,7 +1762,7 @@ impl crate::Context for Context { let mapped_primitive = map_primitive_state(&desc.primitive); mapped_desc.primitive(&mapped_primitive); - Sendable(device.0.create_render_pipeline(&mapped_desc)) + create_identified(device.0.create_render_pipeline(&mapped_desc)) } fn device_create_compute_pipeline( @@ -1747,7 +1783,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_compute_pipeline(&mapped_desc)) + create_identified(device.0.create_compute_pipeline(&mapped_desc)) } fn device_create_buffer( @@ -1761,7 +1797,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_buffer(&mapped_desc)) + create_identified(device.0.create_buffer(&mapped_desc)) } fn device_create_texture( @@ -1780,7 +1816,7 @@ impl crate::Context for Context { mapped_desc.dimension(map_texture_dimension(desc.dimension)); mapped_desc.mip_level_count(desc.mip_level_count); mapped_desc.sample_count(desc.sample_count); - Sendable(device.0.create_texture(&mapped_desc)) + create_identified(device.0.create_texture(&mapped_desc)) } fn device_create_sampler( @@ -1805,7 +1841,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_sampler_with_descriptor(&mapped_desc)) + create_identified(device.0.create_sampler_with_descriptor(&mapped_desc)) } fn device_create_query_set( @@ -1822,7 +1858,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - Sendable(device.0.create_query_set(&mapped_desc)) + create_identified(device.0.create_query_set(&mapped_desc)) } fn device_create_command_encoder( @@ -1968,7 +2004,7 @@ impl crate::Context for Context { if let Some(label) = desc.label { mapped.label(label); } - Sendable(texture.0.create_view_with_descriptor(&mapped)) + create_identified(texture.0.create_view_with_descriptor(&mapped)) } fn surface_drop(&self, _surface: &Self::SurfaceId) { @@ -2048,7 +2084,7 @@ impl crate::Context for Context { pipeline: &Self::ComputePipelineId, index: u32, ) -> Self::BindGroupLayoutId { - Sendable(pipeline.0.get_bind_group_layout(index)) + create_identified(pipeline.0.get_bind_group_layout(index)) } fn render_pipeline_get_bind_group_layout( @@ -2056,7 +2092,7 @@ impl crate::Context for Context { pipeline: &Self::RenderPipelineId, index: u32, ) -> Self::BindGroupLayoutId { - Sendable(pipeline.0.get_bind_group_layout(index)) + create_identified(pipeline.0.get_bind_group_layout(index)) } fn command_encoder_copy_buffer_to_buffer( @@ -2305,7 +2341,7 @@ impl crate::Context for Context { encoder: Self::RenderBundleEncoderId, desc: &crate::RenderBundleDescriptor, ) -> Self::RenderBundleId { - Sendable(match desc.label { + create_identified(match desc.label { Some(label) => { let mut mapped_desc = web_sys::GpuRenderBundleDescriptor::new(); mapped_desc.label(label); diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 2804244573..ff67365ec6 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -2,9 +2,9 @@ //! //! To start using the API, create an [`Instance`]. -#![cfg_attr(docsrs, feature(doc_cfg))] // Allow doc(cfg(feature = "")) for showing in docs that something is feature gated. +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![doc(html_logo_url = "https://raw.githubusercontent.com/gfx-rs/wgpu/master/logo.png")] -#![warn(missing_docs)] +#![warn(missing_docs, unsafe_op_in_unsafe_fn)] mod backend; pub mod util; @@ -36,10 +36,10 @@ pub use wgt::{ PresentMode, PrimitiveState, PrimitiveTopology, PushConstantRange, QueryType, RenderBundleDepthStencil, SamplerBindingType, SamplerBorderColor, ShaderLocation, ShaderModel, ShaderStages, StencilFaceState, StencilOperation, StencilState, StorageTextureAccess, - SurfaceConfiguration, SurfaceStatus, TextureAspect, TextureDimension, TextureFormat, - TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType, TextureUsages, - TextureViewDimension, VertexAttribute, VertexFormat, VertexStepMode, COPY_BUFFER_ALIGNMENT, - COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT, + SurfaceCapabilities, SurfaceConfiguration, SurfaceStatus, TextureAspect, TextureDimension, + TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, TextureSampleType, + TextureUsages, TextureViewDimension, VertexAttribute, VertexFormat, VertexStepMode, + COPY_BUFFER_ALIGNMENT, COPY_BYTES_PER_ROW_ALIGNMENT, MAP_ALIGNMENT, PUSH_CONSTANT_ALIGNMENT, QUERY_RESOLVE_BUFFER_ALIGNMENT, QUERY_SET_MAX_QUERIES, QUERY_SIZE, VERTEX_STRIDE_ALIGNMENT, }; @@ -164,28 +164,32 @@ trait RenderPassInner: RenderInner { ); } +trait GlobalId { + fn global_id(&self) -> u64; +} + trait Context: Debug + Send + Sized + Sync { - type AdapterId: Debug + Send + Sync + 'static; - type DeviceId: Debug + Send + Sync + 'static; - type QueueId: Debug + Send + Sync + 'static; - type ShaderModuleId: Debug + Send + Sync + 'static; - type BindGroupLayoutId: Debug + Send + Sync + 'static; - type BindGroupId: Debug + Send + Sync + 'static; - type TextureViewId: Debug + Send + Sync + 'static; - type SamplerId: Debug + Send + Sync + 'static; - type BufferId: Debug + Send + Sync + 'static; - type TextureId: Debug + Send + Sync + 'static; - type QuerySetId: Debug + Send + Sync + 'static; - type PipelineLayoutId: Debug + Send + Sync + 'static; - type RenderPipelineId: Debug + Send + Sync + 'static; - type ComputePipelineId: Debug + Send + Sync + 'static; + type AdapterId: GlobalId + Debug + Send + Sync + 'static; + type DeviceId: GlobalId + Debug + Send + Sync + 'static; + type QueueId: GlobalId + Debug + Send + Sync + 'static; + type ShaderModuleId: GlobalId + Debug + Send + Sync + 'static; + type BindGroupLayoutId: GlobalId + Debug + Send + Sync + 'static; + type BindGroupId: GlobalId + Debug + Send + Sync + 'static; + type TextureViewId: GlobalId + Debug + Send + Sync + 'static; + type SamplerId: GlobalId + Debug + Send + Sync + 'static; + type BufferId: GlobalId + Debug + Send + Sync + 'static; + type TextureId: GlobalId + Debug + Send + Sync + 'static; + type QuerySetId: GlobalId + Debug + Send + Sync + 'static; + type PipelineLayoutId: GlobalId + Debug + Send + Sync + 'static; + type RenderPipelineId: GlobalId + Debug + Send + Sync + 'static; + type ComputePipelineId: GlobalId + Debug + Send + Sync + 'static; type CommandEncoderId: Debug; type ComputePassId: Debug + ComputePassInner; type RenderPassId: Debug + RenderPassInner; type CommandBufferId: Debug + Send + Sync; type RenderBundleEncoderId: Debug + RenderInner; - type RenderBundleId: Debug + Send + Sync + 'static; - type SurfaceId: Debug + Send + Sync + 'static; + type RenderBundleId: GlobalId + Debug + Send + Sync + 'static; + type SurfaceId: GlobalId + Debug + Send + Sync + 'static; type SurfaceOutputDetail: Send; type SubmissionIndex: Debug + Copy + Clone + Send + 'static; @@ -227,21 +231,11 @@ trait Context: Debug + Send + Sized + Sync { format: TextureFormat, ) -> TextureFormatFeatures; - fn surface_get_supported_formats( - &self, - surface: &Self::SurfaceId, - adapter: &Self::AdapterId, - ) -> Vec; - fn surface_get_supported_present_modes( + fn surface_get_capabilities( &self, surface: &Self::SurfaceId, adapter: &Self::AdapterId, - ) -> Vec; - fn surface_get_supported_alpha_modes( - &self, - surface: &Self::SurfaceId, - adapter: &Self::AdapterId, - ) -> Vec; + ) -> wgt::SurfaceCapabilities; fn surface_configure( &self, surface: &Self::SurfaceId, @@ -843,13 +837,11 @@ pub enum ShaderSource<'a> { /// /// See also: [`util::make_spirv`], [`include_spirv`] #[cfg(feature = "spirv")] - #[cfg_attr(docsrs, doc(cfg(feature = "spirv")))] SpirV(Cow<'a, [u32]>), /// GLSL module as a string slice. /// /// Note: GLSL is not yet fully supported and must be a specific ShaderStage. #[cfg(feature = "glsl")] - #[cfg_attr(docsrs, doc(cfg(feature = "glsl")))] Glsl { /// The source code of the shader. shader: Cow<'a, str>, @@ -860,11 +852,9 @@ pub enum ShaderSource<'a> { }, /// WGSL module as a string slice. #[cfg(feature = "wgsl")] - #[cfg_attr(docsrs, doc(cfg(feature = "wgsl")))] Wgsl(Cow<'a, str>), /// Naga module. #[cfg(feature = "naga")] - #[cfg_attr(docsrs, doc(cfg(feature = "naga")))] Naga(Cow<'static, naga::Module>), /// Dummy variant because `Naga` doesn't have a lifetime and without enough active features it /// could be the last one active. @@ -1738,7 +1728,7 @@ impl Instance { #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] pub unsafe fn from_hal(hal_instance: A::Instance) -> Self { Self { - context: Arc::new(C::from_hal_instance::(hal_instance)), + context: Arc::new(unsafe { C::from_hal_instance::(hal_instance) }), } } @@ -1754,7 +1744,7 @@ impl Instance { /// [`Instance`]: hal::Api::Instance #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))] pub unsafe fn as_hal(&self) -> Option<&A::Instance> { - self.context.instance_as_hal::() + unsafe { self.context.instance_as_hal::() } } /// Create an new instance of wgpu from a wgpu-core instance. @@ -1769,7 +1759,7 @@ impl Instance { #[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))] pub unsafe fn from_core(core_instance: wgc::instance::Instance) -> Self { Self { - context: Arc::new(C::from_core_instance(core_instance)), + context: Arc::new(unsafe { C::from_core_instance(core_instance) }), } } @@ -1815,7 +1805,7 @@ impl Instance { hal_adapter: hal::ExposedAdapter, ) -> Adapter { let context = Arc::clone(&self.context); - let id = context.create_adapter_from_hal(hal_adapter); + let id = unsafe { context.create_adapter_from_hal(hal_adapter) }; Adapter { context, id } } @@ -1855,7 +1845,7 @@ impl Instance { &self, layer: *mut std::ffi::c_void, ) -> Surface { - self.context.create_surface_from_core_animation_layer(layer) + unsafe { self.context.create_surface_from_core_animation_layer(layer) } } /// Creates a surface from `IDCompositionVisual`. @@ -1865,7 +1855,7 @@ impl Instance { /// - visual must be a valid IDCompositionVisual to create a surface upon. #[cfg(target_os = "windows")] pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface { - self.context.create_surface_from_visual(visual) + unsafe { self.context.create_surface_from_visual(visual) } } /// Creates a surface from a `web_sys::HtmlCanvasElement`. @@ -1978,20 +1968,22 @@ impl Adapter { trace_path: Option<&std::path::Path>, ) -> Result<(Device, Queue), RequestDeviceError> { let context = Arc::clone(&self.context); - self.context - .create_device_from_hal(&self.id, hal_device, desc, trace_path) - .map(|(device_id, queue_id)| { - ( - Device { - context: Arc::clone(&context), - id: device_id, - }, - Queue { - context, - id: queue_id, - }, - ) - }) + unsafe { + self.context + .create_device_from_hal(&self.id, hal_device, desc, trace_path) + } + .map(|(device_id, queue_id)| { + ( + Device { + context: Arc::clone(&context), + id: device_id, + }, + Queue { + context, + id: queue_id, + }, + ) + }) } /// Apply a callback to this `Adapter`'s underlying backend adapter. @@ -2018,8 +2010,10 @@ impl Adapter { &self, hal_adapter_callback: F, ) -> R { - self.context - .adapter_as_hal::(self.id, hal_adapter_callback) + unsafe { + self.context + .adapter_as_hal::(self.id, hal_adapter_callback) + } } /// Returns whether this adapter may present to the passed surface. @@ -2119,12 +2113,14 @@ impl Device { ) -> ShaderModule { ShaderModule { context: Arc::clone(&self.context), - id: Context::device_create_shader_module( - &*self.context, - &self.id, - desc, - wgt::ShaderBoundChecks::unchecked(), - ), + id: unsafe { + Context::device_create_shader_module( + &*self.context, + &self.id, + desc, + wgt::ShaderBoundChecks::unchecked(), + ) + }, } } @@ -2142,7 +2138,9 @@ impl Device { ) -> ShaderModule { ShaderModule { context: Arc::clone(&self.context), - id: Context::device_create_shader_module_spirv(&*self.context, &self.id, desc), + id: unsafe { + Context::device_create_shader_module_spirv(&*self.context, &self.id, desc) + }, } } @@ -2252,9 +2250,10 @@ impl Device { ) -> Texture { Texture { context: Arc::clone(&self.context), - id: self - .context - .create_texture_from_hal::(hal_texture, &self.id, desc), + id: unsafe { + self.context + .create_texture_from_hal::(hal_texture, &self.id, desc) + }, owned: true, } } @@ -2326,8 +2325,10 @@ impl Device { &self, hal_device_callback: F, ) -> R { - self.context - .device_as_hal::(&self.id, hal_device_callback) + unsafe { + self.context + .device_as_hal::(&self.id, hal_device_callback) + } } } @@ -2637,8 +2638,10 @@ impl Texture { &self, hal_texture_callback: F, ) { - self.context - .texture_as_hal::(&self.id, hal_texture_callback) + unsafe { + self.context + .texture_as_hal::(&self.id, hal_texture_callback) + } } /// Creates a view of this texture. @@ -3600,13 +3603,23 @@ impl Queue { } } - /// Schedule a data write into `texture`. + /// Schedule a write of some data into a texture. + /// + /// * `data` contains the texels to be written, which must be in + /// [the same format as the texture](TextureFormat). + /// * `data_layout` describes the memory layout of `data`, which does not necessarily + /// have to have tightly packed rows. + /// * `texture` specifies the texture to write into, and the location within the + /// texture (coordinate offset, mip level) that will be overwritten. + /// * `size` is the size, in texels, of the region to be written. /// /// This method is intended to have low performance costs. /// As such, the write is not immediately submitted, and instead enqueued /// internally to happen at the start of the next `submit()` call. + /// However, `data` will be immediately copied into staging memory; so the caller may + /// discard it any time after this call completes. /// - /// This method fails if `data` overruns the size of fragment of `texture` specified with `size`. + /// This method fails if `size` overruns the size of `texture`, or if `data` is too short. pub fn write_texture( &self, texture: ImageCopyTexture, @@ -3617,7 +3630,7 @@ impl Queue { Context::queue_write_texture(&*self.context, &self.id, texture, data, data_layout, size) } - /// Schedule a copy of data from `image` into `texture` + /// Schedule a copy of data from `image` into `texture`. #[cfg(all(target_arch = "wasm32", not(feature = "webgl")))] pub fn copy_external_image_to_texture( &self, @@ -3691,43 +3704,31 @@ impl Drop for SurfaceTexture { } impl Surface { - /// Returns a vec of supported texture formats to use for the [`Surface`] with this adapter. - /// Note: The first format in the vector is preferred - /// - /// Returns an empty vector if the surface is incompatible with the adapter. - pub fn get_supported_formats(&self, adapter: &Adapter) -> Vec { - Context::surface_get_supported_formats(&*self.context, &self.id, &adapter.id) - } - - /// Returns a vec of supported presentation modes to use for the [`Surface`] with this adapter. - /// - /// Returns an empty vector if the surface is incompatible with the adapter. - pub fn get_supported_present_modes(&self, adapter: &Adapter) -> Vec { - Context::surface_get_supported_present_modes(&*self.context, &self.id, &adapter.id) - } - - /// Returns a vec of supported alpha modes to use for the [`Surface`] with this adapter. + /// Returns the capabilities of the surface when used with the given adapter. /// - /// Will return at least one element, CompositeAlphaMode::Opaque or CompositeAlphaMode::Inherit. - pub fn get_supported_alpha_modes(&self, adapter: &Adapter) -> Vec { - Context::surface_get_supported_alpha_modes(&*self.context, &self.id, &adapter.id) + /// Returns specified values (see [`SurfaceCapabilities`]) if surface is incompatible with the adapter. + pub fn get_capabilities(&self, adapter: &Adapter) -> SurfaceCapabilities { + Context::surface_get_capabilities(&*self.context, &self.id, &adapter.id) } /// Return a default `SurfaceConfiguration` from width and height to use for the [`Surface`] with this adapter. + /// + /// Returns None if the surface isn't supported by this adapter pub fn get_default_config( &self, adapter: &Adapter, width: u32, height: u32, - ) -> wgt::SurfaceConfiguration { - wgt::SurfaceConfiguration { + ) -> Option { + let caps = self.get_capabilities(adapter); + Some(wgt::SurfaceConfiguration { usage: wgt::TextureUsages::RENDER_ATTACHMENT, - format: self.get_supported_formats(adapter)[0], + format: *caps.formats.get(0)?, width, height, - present_mode: self.get_supported_present_modes(adapter)[0], + present_mode: *caps.present_modes.get(0)?, alpha_mode: wgt::CompositeAlphaMode::Auto, - } + }) } /// Initializes [`Surface`] for presentation. @@ -3773,6 +3774,222 @@ impl Surface { }) .ok_or(SurfaceError::Lost) } + + /// Returns the inner hal Surface using a callback. The hal surface will be `None` if the + /// backend type argument does not match with this wgpu Surface + /// + /// # Safety + /// + /// - The raw handle obtained from the hal Surface must not be manually destroyed + #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] + pub unsafe fn as_hal_mut) -> R, R>( + &mut self, + hal_surface_callback: F, + ) -> R { + unsafe { + self.context + .surface_as_hal_mut::(&self.id, hal_surface_callback) + } + } +} + +/// Opaque globally-unique identifier +#[cfg(feature = "expose-ids")] +#[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] +#[repr(transparent)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Id(u64); + +#[cfg(feature = "expose-ids")] +impl Adapter { + /// Returns a globally-unique identifier for this `Adapter`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Device { + /// Returns a globally-unique identifier for this `Device`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Queue { + /// Returns a globally-unique identifier for this `Queue`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl ShaderModule { + /// Returns a globally-unique identifier for this `ShaderModule`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl BindGroupLayout { + /// Returns a globally-unique identifier for this `BindGroupLayout`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl BindGroup { + /// Returns a globally-unique identifier for this `BindGroup`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl TextureView { + /// Returns a globally-unique identifier for this `TextureView`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Sampler { + /// Returns a globally-unique identifier for this `Sampler`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Buffer { + /// Returns a globally-unique identifier for this `Buffer`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Texture { + /// Returns a globally-unique identifier for this `Texture`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl QuerySet { + /// Returns a globally-unique identifier for this `QuerySet`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl PipelineLayout { + /// Returns a globally-unique identifier for this `PipelineLayout`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl RenderPipeline { + /// Returns a globally-unique identifier for this `RenderPipeline`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl ComputePipeline { + /// Returns a globally-unique identifier for this `ComputePipeline`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl RenderBundle { + /// Returns a globally-unique identifier for this `RenderBundle`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } +} + +#[cfg(feature = "expose-ids")] +impl Surface { + /// Returns a globally-unique identifier for this `Surface`. + /// + /// Calling this method multiple times on the same object will always return the same value. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. + #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] + pub fn global_id(&self) -> Id { + Id(self.id.global_id()) + } } /// Type for the callback of uncaptured error handler diff --git a/wgpu/tests/buffer.rs b/wgpu/tests/buffer.rs index b1170765ec..ab3d386691 100644 --- a/wgpu/tests/buffer.rs +++ b/wgpu/tests/buffer.rs @@ -1,9 +1,6 @@ use crate::common::{initialize_test, TestParameters, TestingContext}; -use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) { - let status = Arc::new(AtomicBool::new(false)); - let r = wgpu::BufferUsages::MAP_READ; let rw = wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::MAP_WRITE; for usage in [r, rw] { @@ -14,15 +11,10 @@ fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) mapped_at_creation: false, }); - let done = status.clone(); - b0.slice(0..0).map_async(wgpu::MapMode::Read, move |result| { - assert!(result.is_ok()); - done.store(true, Ordering::SeqCst); - }); + b0.slice(0..0) + .map_async(wgpu::MapMode::Read, Result::unwrap); - while !status.load(Ordering::SeqCst) { - ctx.device.poll(wgpu::MaintainBase::Poll); - } + ctx.device.poll(wgpu::MaintainBase::Wait); { let view = b0.slice(0..0).get_mapped_range(); @@ -37,30 +29,26 @@ fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) // Map multiple times before unmapping. b0.slice(0..0).map_async(wgpu::MapMode::Read, move |_| {}); - b0.slice(0..0).map_async(wgpu::MapMode::Read, move |result| { - assert!(result.is_err()); - }); - b0.slice(0..0).map_async(wgpu::MapMode::Read, move |result| { - assert!(result.is_err()); - }); - b0.slice(0..0).map_async(wgpu::MapMode::Read, move |result| { - assert!(result.is_err()); - }); + b0.slice(0..0) + .map_async(wgpu::MapMode::Read, move |result| { + assert!(result.is_err()); + }); + b0.slice(0..0) + .map_async(wgpu::MapMode::Read, move |result| { + assert!(result.is_err()); + }); + b0.slice(0..0) + .map_async(wgpu::MapMode::Read, move |result| { + assert!(result.is_err()); + }); b0.unmap(); - status.store(false, Ordering::SeqCst); - // Write mode. if usage == rw { - let done = status.clone(); - b0.slice(0..0).map_async(wgpu::MapMode::Write, move |result| { - assert!(result.is_ok()); - done.store(true, Ordering::SeqCst); - }); + b0.slice(0..0) + .map_async(wgpu::MapMode::Write, Result::unwrap); - while !status.load(Ordering::SeqCst) { - ctx.device.poll(wgpu::MaintainBase::Poll); - } + ctx.device.poll(wgpu::MaintainBase::Wait); //{ // let view = b0.slice(0..0).get_mapped_range_mut(); @@ -72,11 +60,9 @@ fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) // Map and unmap right away. b0.slice(0..0).map_async(wgpu::MapMode::Write, move |_| {}); b0.unmap(); - } } - let b1 = ctx.device.create_buffer(&wgpu::BufferDescriptor { label: Some(label), size: buffer_size, @@ -91,18 +77,85 @@ fn test_empty_buffer_range(ctx: &TestingContext, buffer_size: u64, label: &str) b1.unmap(); - for _ in 0..10 { - ctx.device.poll(wgpu::MaintainBase::Poll); - } + ctx.device.poll(wgpu::MaintainBase::Wait); } #[test] +#[ignore] fn empty_buffer() { - initialize_test( - TestParameters::default(), - |ctx| { - test_empty_buffer_range(&ctx, 2048, "regular buffer"); - test_empty_buffer_range(&ctx, 0, "zero-sized buffer"); + // TODO: Currently wgpu does not accept empty buffer slices, which + // is what test is about. + initialize_test(TestParameters::default(), |ctx| { + test_empty_buffer_range(&ctx, 2048, "regular buffer"); + test_empty_buffer_range(&ctx, 0, "zero-sized buffer"); + }) +} + +#[test] +fn test_map_offset() { + initialize_test(TestParameters::default(), |ctx| { + // This test writes 16 bytes at the beginning of buffer mapped mapped with + // an offset of 32 bytes. Then the buffer is copied into another buffer that + // is read back and we check that the written bytes are correctly placed at + // offset 32..48. + // The goal is to check that get_mapped_range did not accidentally double-count + // the mapped offset. + + let write_buf = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 256, + usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); + let read_buf = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 256, + usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + write_buf + .slice(32..) + .map_async(wgpu::MapMode::Write, move |result| { + result.unwrap(); + }); + + ctx.device.poll(wgpu::MaintainBase::Wait); + + { + let slice = write_buf.slice(32..48); + let mut view = slice.get_mapped_range_mut(); + for byte in &mut view[..] { + *byte = 2; + } } - ) + + write_buf.unmap(); + + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None }); + + encoder.copy_buffer_to_buffer(&write_buf, 0, &read_buf, 0, 256); + + ctx.queue.submit(Some(encoder.finish())); + + read_buf + .slice(..) + .map_async(wgpu::MapMode::Read, Result::unwrap); + + ctx.device.poll(wgpu::MaintainBase::Wait); + + let slice = read_buf.slice(..); + let view = slice.get_mapped_range(); + for byte in &view[0..32] { + assert_eq!(*byte, 0); + } + for byte in &view[32..48] { + assert_eq!(*byte, 2); + } + for byte in &view[48..] { + assert_eq!(*byte, 0); + } + }); } diff --git a/wgpu/tests/buffer_usages.rs b/wgpu/tests/buffer_usages.rs index ebf679ca05..db734c108b 100644 --- a/wgpu/tests/buffer_usages.rs +++ b/wgpu/tests/buffer_usages.rs @@ -1,31 +1,31 @@ //! Tests for buffer usages validation. +use crate::common::{fail_if, initialize_test, TestParameters}; use wgt::BufferAddress; -use crate::common::{initialize_test, TestParameters}; +const BUFFER_SIZE: BufferAddress = 1234; #[test] fn buffer_usage() { - fn try_create( - usages: &[wgpu::BufferUsages], - enable_mappable_primary_buffers: bool, - should_panic: bool, - ) { + fn try_create(enable_mappable_primary_buffers: bool, usages: &[(bool, &[wgpu::BufferUsages])]) { let mut parameters = TestParameters::default(); if enable_mappable_primary_buffers { parameters = parameters.features(wgpu::Features::MAPPABLE_PRIMARY_BUFFERS); } - if should_panic { - parameters = parameters.failure(); - } initialize_test(parameters, |ctx| { - for usage in usages.iter().copied() { - let _buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { - label: None, - size: BUFFER_SIZE, - usage, - mapped_at_creation: false, + for (expect_validation_error, usage) in + usages.iter().flat_map(|&(expect_error, usages)| { + usages.iter().copied().map(move |u| (expect_error, u)) + }) + { + fail_if(&ctx.device, expect_validation_error, || { + let _buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: BUFFER_SIZE, + usage, + mapped_at_creation: false, + }); }); } }); @@ -33,7 +33,7 @@ fn buffer_usage() { use wgpu::BufferUsages as Bu; - let always_valid = [ + let always_valid = &[ Bu::MAP_READ, Bu::MAP_WRITE, Bu::MAP_READ | Bu::COPY_DST, @@ -41,25 +41,32 @@ fn buffer_usage() { ]; // MAP_READ can only be paired with COPY_DST and MAP_WRITE can only be paired with COPY_SRC // (unless Features::MAPPABlE_PRIMARY_BUFFERS is enabled). - let needs_mappable_primary_buffers = [ + let needs_mappable_primary_buffers = &[ Bu::MAP_READ | Bu::COPY_DST | Bu::COPY_SRC, Bu::MAP_WRITE | Bu::COPY_SRC | Bu::COPY_DST, Bu::MAP_READ | Bu::MAP_WRITE, Bu::MAP_WRITE | Bu::MAP_READ, Bu::MAP_READ | Bu::COPY_DST | Bu::STORAGE, Bu::MAP_WRITE | Bu::COPY_SRC | Bu::STORAGE, - wgpu::BufferUsages::all(), + Bu::all(), ]; - let always_fail = [Bu::empty()]; - - try_create(&always_valid, false, false); - try_create(&always_valid, true, false); + let invalid_bits = unsafe { Bu::from_bits_unchecked(0b1111111111111) }; + let always_fail = &[Bu::empty(), invalid_bits]; - try_create(&needs_mappable_primary_buffers, false, true); - try_create(&needs_mappable_primary_buffers, true, false); - - try_create(&always_fail, false, true); - try_create(&always_fail, true, true); + try_create( + false, + &[ + (false, always_valid), + (true, needs_mappable_primary_buffers), + (true, always_fail), + ], + ); + try_create( + true, // enable Features::MAPPABLE_PRIMARY_BUFFERS + &[ + (false, always_valid), + (false, needs_mappable_primary_buffers), + (true, always_fail), + ], + ); } - -const BUFFER_SIZE: BufferAddress = 1234; diff --git a/wgpu/tests/clear_texture.rs b/wgpu/tests/clear_texture.rs index 9abf7e043d..abe86cac43 100644 --- a/wgpu/tests/clear_texture.rs +++ b/wgpu/tests/clear_texture.rs @@ -44,6 +44,7 @@ static TEXTURE_FORMATS_DEPTH: &[wgpu::TextureFormat] = &[ //wgpu::TextureFormat::Stencil8, wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth24Plus, + wgpu::TextureFormat::Depth24PlusStencil8, ]; // needs TEXTURE_COMPRESSION_BC @@ -329,22 +330,6 @@ fn clear_texture_d32_s8() { ) } -#[test] -fn clear_texture_d24_s8() { - initialize_test( - TestParameters::default() - .features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::DEPTH24PLUS_STENCIL8), - |ctx| { - clear_texture_tests( - &ctx, - &[wgpu::TextureFormat::Depth24PlusStencil8], - false, - false, - ); - }, - ) -} - #[test] fn clear_texture_2d_bc() { initialize_test( diff --git a/wgpu/tests/common/mod.rs b/wgpu/tests/common/mod.rs index 9b12f46726..dedf80f5a3 100644 --- a/wgpu/tests/common/mod.rs +++ b/wgpu/tests/common/mod.rs @@ -177,7 +177,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te backend_bits, None, )) - .expect("could not find sutable adapter on the system"); + .expect("could not find suitable adapter on the system"); let adapter_info = adapter.get_info(); let adapter_lowercase_name = adapter_info.name.to_lowercase(); @@ -278,7 +278,13 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te } let panicked = catch_unwind(AssertUnwindSafe(|| test_function(context))).is_err(); - let canary_set = hal::VALIDATION_CANARY.get_and_reset(); + cfg_if::cfg_if!( + if #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] { + let canary_set = hal::VALIDATION_CANARY.get_and_reset(); + } else { + let canary_set = false; + } + ); let failed = panicked || canary_set; @@ -325,3 +331,13 @@ pub fn valid(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T { result } + +// Run some code in an error scope and assert that validation succeeds or fails depending on the +// provided `should_fail` boolean. +pub fn fail_if(device: &wgpu::Device, should_fail: bool, callback: impl FnOnce() -> T) -> T { + if should_fail { + fail(device, callback) + } else { + valid(device, callback) + } +} diff --git a/wgpu/tests/queue_transfer.rs b/wgpu/tests/queue_transfer.rs new file mode 100644 index 0000000000..7724c291cd --- /dev/null +++ b/wgpu/tests/queue_transfer.rs @@ -0,0 +1,49 @@ +//! Tests for buffer copy validation. + +use std::num::NonZeroU32; + +use crate::common::{fail, initialize_test, TestParameters}; + +#[test] +fn queue_write_texture_overflow() { + initialize_test(TestParameters::default(), |ctx| { + let texture = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: wgpu::Extent3d { + width: 146, + height: 25, + depth_or_array_layers: 192, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba32Float, + usage: wgpu::TextureUsages::COPY_DST, + }); + + let data = vec![255; 128]; + + fail(&ctx.device, || { + ctx.queue.write_texture( + wgpu::ImageCopyTexture { + texture: &texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + &data, + wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: NonZeroU32::new(879161360), + //bytes_per_image: 4294967295, + rows_per_image: NonZeroU32::new(4294967295 / 879161360), + }, + wgpu::Extent3d { + width: 3056263286, + height: 64, + depth_or_array_layers: 1144576469, + }, + ); + }); + }); +} diff --git a/wgpu/tests/root.rs b/wgpu/tests/root.rs index d41032eb10..e721e3a7a7 100644 --- a/wgpu/tests/root.rs +++ b/wgpu/tests/root.rs @@ -1,6 +1,7 @@ // All files containing tests mod common; +mod buffer; mod buffer_copy; mod buffer_usages; mod clear_texture; @@ -9,8 +10,10 @@ mod encoder; mod example_wgsl; mod instance; mod poll; +mod queue_transfer; mod resource_descriptor_accessor; mod resource_error; +mod shader; mod shader_primitive_index; mod texture_bounds; mod transfer; diff --git a/wgpu/tests/shader/mod.rs b/wgpu/tests/shader/mod.rs new file mode 100644 index 0000000000..c1cca4e4d7 --- /dev/null +++ b/wgpu/tests/shader/mod.rs @@ -0,0 +1,371 @@ +//! Infrastructure for testing particular behavior of shaders across platforms. +//! +//! The tests take the form of a input buffer filled with u32 data. A compute +//! shader is run on the input buffer which generates an output buffer. This +//! buffer is then read and compared to a given output. + +use std::{borrow::Cow, fmt::Debug}; + +use wgpu::{ + Backends, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, + BindingType, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, ComputePassDescriptor, + ComputePipelineDescriptor, Maintain, MapMode, PipelineLayoutDescriptor, PushConstantRange, + ShaderModuleDescriptor, ShaderSource, ShaderStages, +}; + +use crate::common::TestingContext; + +mod numeric_builtins; +mod struct_layout; + +#[derive(Clone, Copy, PartialEq)] +enum InputStorageType { + Uniform, + Storage, + PushConstant, +} + +impl InputStorageType { + fn as_str(&self) -> &'static str { + match self { + InputStorageType::Uniform => "uniform", + InputStorageType::Storage => "storage", + InputStorageType::PushConstant => "push_constant", + } + } +} + +/// Describes a single test of a shader. +struct ShaderTest { + /// Human readable name + name: String, + /// This text will be the body of the `Input` struct. Replaces "{{input_members}}" + /// in the shader_test shader. + custom_struct_members: String, + /// This text will be the body of the compute shader. Replaces "{{body}}" + /// in the shader_test shader. + body: String, + /// This text will be the input type of the compute shader. Replaces "{{input_type}}". + /// + /// Defaults to "CustomStruct" + input_type: String, + /// This text will be the output type of the compute shader. Replaces "{{output_type}}". + /// + /// Defaults to "array". + output_type: String, + /// List of values will be written to the input buffer. + input_values: Vec, + /// List of lists of valid expected outputs from the shader. + output_values: Vec>, + /// Function which compares the output values to the resulting values and + /// prints a message on failure. + /// + /// Defaults [`Self::default_comparison_function`]. + output_comparison_fn: fn(&str, &[u32], &[Vec]) -> bool, + /// Value to pre-initialize the output buffer to. Often u32::MAX so + /// that writing a 0 looks different than not writing a value at all. + /// + /// Defaults to u32::MAX. + output_initialization: u32, + /// Which backends this test will fail on. If the test passes on this + /// backend when it shouldn't, an assert will be raised. + /// + /// Defaults to Backends::empty(). + failures: Backends, +} +impl ShaderTest { + fn default_comparison_function( + test_name: &str, + actual_values: &[u32], + expected_values: &[Vec], + ) -> bool { + let cast_actual = bytemuck::cast_slice::(actual_values); + + // When printing the error message, we want to trim `cast_actual` to the length + // of the longest set of expected values. This tracks that value. + let mut max_relevant_value_count = 0; + + for expected in expected_values { + let cast_expected = bytemuck::cast_slice::(expected); + + // We shorten the actual to the length of the expected. + if &cast_actual[0..cast_expected.len()] == cast_expected { + return true; + } + + max_relevant_value_count = max_relevant_value_count.max(cast_expected.len()); + } + + // We haven't found a match, lets print an error. + + eprint!( + "Inner test failure. Actual {:?}. Expected", + &cast_actual[0..max_relevant_value_count] + ); + + if expected_values.len() != 1 { + eprint!(" one of: "); + } else { + eprint!(": "); + } + + for (idx, expected) in expected_values.iter().enumerate() { + let cast_expected = bytemuck::cast_slice::(expected); + eprint!("{cast_expected:?}"); + if idx + 1 != expected_values.len() { + eprint!(" "); + } + } + + eprintln!(". Test {test_name}"); + + false + } + + fn new( + name: String, + custom_struct_members: String, + body: String, + input_values: &[I], + output_values: &[O], + ) -> Self { + Self { + name, + custom_struct_members, + body, + input_type: String::from("CustomStruct"), + output_type: String::from("array"), + input_values: bytemuck::cast_slice(input_values).to_vec(), + output_values: vec![bytemuck::cast_slice(output_values).to_vec()], + output_comparison_fn: Self::default_comparison_function::, + output_initialization: u32::MAX, + failures: Backends::empty(), + } + } + + /// Add another set of possible outputs. If any of the given + /// output values are seen it's considered a success (i.e. this is OR, not AND). + /// + /// Assumes that this type O is the same as the O provided to new. + fn extra_output_values( + mut self, + output_values: &[O], + ) -> Self { + self.output_values + .push(bytemuck::cast_slice(output_values).to_vec()); + + self + } + + fn failures(mut self, failures: Backends) -> Self { + self.failures = failures; + + self + } +} + +const MAX_BUFFER_SIZE: u64 = 128; + +/// Runs the given shader tests with the given storage_type for the input_buffer. +fn shader_input_output_test( + ctx: TestingContext, + storage_type: InputStorageType, + tests: Vec, +) { + let source = String::from(include_str!("shader_test.wgsl")); + + let bgl = ctx + .device + .create_bind_group_layout(&BindGroupLayoutDescriptor { + label: None, + entries: &[ + BindGroupLayoutEntry { + binding: 0, + visibility: ShaderStages::COMPUTE, + ty: BindingType::Buffer { + // We don't use this buffer for push constants, but for simplicity + // we just use the storage buffer binding. + ty: match storage_type { + InputStorageType::Uniform => wgpu::BufferBindingType::Uniform, + InputStorageType::Storage | InputStorageType::PushConstant => { + wgpu::BufferBindingType::Storage { read_only: true } + } + }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }, + BindGroupLayoutEntry { + binding: 1, + visibility: ShaderStages::COMPUTE, + ty: BindingType::Buffer { + ty: wgpu::BufferBindingType::Storage { read_only: false }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }, + ], + }); + + let input_buffer = ctx.device.create_buffer(&BufferDescriptor { + label: Some("input buffer"), + size: MAX_BUFFER_SIZE, + usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM | BufferUsages::STORAGE, + mapped_at_creation: false, + }); + + let output_buffer = ctx.device.create_buffer(&BufferDescriptor { + label: Some("output buffer"), + size: MAX_BUFFER_SIZE, + usage: BufferUsages::COPY_DST | BufferUsages::COPY_SRC | BufferUsages::STORAGE, + mapped_at_creation: false, + }); + + let mapping_buffer = ctx.device.create_buffer(&BufferDescriptor { + label: Some("mapping buffer"), + size: MAX_BUFFER_SIZE, + usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, + mapped_at_creation: false, + }); + + let bg = ctx.device.create_bind_group(&BindGroupDescriptor { + label: None, + layout: &bgl, + entries: &[ + BindGroupEntry { + binding: 0, + resource: input_buffer.as_entire_binding(), + }, + BindGroupEntry { + binding: 1, + resource: output_buffer.as_entire_binding(), + }, + ], + }); + + let pll = ctx + .device + .create_pipeline_layout(&PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&bgl], + push_constant_ranges: match storage_type { + InputStorageType::PushConstant => &[PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..MAX_BUFFER_SIZE as u32, + }], + _ => &[], + }, + }); + + let mut fail = false; + for test in tests { + assert!(test.input_values.len() <= MAX_BUFFER_SIZE as usize / 4); + assert!(test.output_values.len() <= MAX_BUFFER_SIZE as usize / 4); + + let test_name = test.name; + + // -- Building shader + pipeline -- + + // This isn't terribly efficient but the string is short and it's a test. + // The body and input members are the longest part, so do them last. + let mut processed = source + .replace("{{storage_type}}", storage_type.as_str()) + .replace("{{input_type}}", &test.input_type) + .replace("{{output_type}}", &test.output_type) + .replace("{{input_members}}", &test.custom_struct_members) + .replace("{{body}}", &test.body); + + // Add the bindings for all inputs besides push constants. + processed = if matches!(storage_type, InputStorageType::PushConstant) { + processed.replace("{{input_bindings}}", "") + } else { + processed.replace("{{input_bindings}}", "@group(0) @binding(0)") + }; + + let sm = ctx.device.create_shader_module(ShaderModuleDescriptor { + label: Some(&format!("shader {test_name}")), + source: ShaderSource::Wgsl(Cow::Borrowed(&processed)), + }); + + let pipeline = ctx + .device + .create_compute_pipeline(&ComputePipelineDescriptor { + label: Some(&format!("pipeline {test_name}")), + layout: Some(&pll), + module: &sm, + entry_point: "cs_main", + }); + + // -- Initializing data -- + + let output_pre_init_data = vec![test.output_initialization; MAX_BUFFER_SIZE as usize / 4]; + ctx.queue.write_buffer( + &output_buffer, + 0, + bytemuck::cast_slice(&output_pre_init_data), + ); + + match storage_type { + InputStorageType::Uniform | InputStorageType::Storage => { + ctx.queue + .write_buffer(&input_buffer, 0, bytemuck::cast_slice(&test.input_values)); + } + _ => { + // Init happens in the compute pass + } + } + + // -- Run test -- + + let mut encoder = ctx + .device + .create_command_encoder(&CommandEncoderDescriptor { label: None }); + + let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor { + label: Some(&format!("cpass {test_name}")), + }); + cpass.set_pipeline(&pipeline); + cpass.set_bind_group(0, &bg, &[]); + + if let InputStorageType::PushConstant = storage_type { + cpass.set_push_constants(0, bytemuck::cast_slice(&test.input_values)) + } + + cpass.dispatch_workgroups(1, 1, 1); + drop(cpass); + + // -- Pulldown data -- + + encoder.copy_buffer_to_buffer(&output_buffer, 0, &mapping_buffer, 0, MAX_BUFFER_SIZE); + + ctx.queue.submit(Some(encoder.finish())); + + mapping_buffer.slice(..).map_async(MapMode::Read, |_| ()); + ctx.device.poll(Maintain::Wait); + + let mapped = mapping_buffer.slice(..).get_mapped_range(); + + let typed: &[u32] = bytemuck::cast_slice(&mapped); + + // -- Check results -- + + let failure = !(test.output_comparison_fn)(&test_name, typed, &test.output_values); + // We don't immediately panic to let all tests execute + if failure + != test + .failures + .contains(ctx.adapter.get_info().backend.into()) + { + fail |= true; + if !failure { + eprintln!("Unexpected test success. Test {test_name}"); + } + } + + drop(mapped); + mapping_buffer.unmap(); + } + assert!(!fail); +} diff --git a/wgpu/tests/shader/numeric_builtins.rs b/wgpu/tests/shader/numeric_builtins.rs new file mode 100644 index 0000000000..83b278cfbf --- /dev/null +++ b/wgpu/tests/shader/numeric_builtins.rs @@ -0,0 +1,56 @@ +use wgpu::{DownlevelFlags, Limits}; + +use crate::{ + common::{initialize_test, TestParameters}, + shader::{shader_input_output_test, InputStorageType, ShaderTest}, +}; + +fn create_numeric_builtin_test() -> Vec { + let mut tests = Vec::new(); + + #[rustfmt::skip] + let clamp_values: &[(f32, f32, f32, &[f32])] = &[ + // value - low - high - valid outputs + + // normal clamps + ( 20.0, 0.0, 10.0, &[10.0]), + ( -10.0, 0.0, 10.0, &[0.0]), + ( 5.0, 0.0, 10.0, &[5.0]), + + // med-of-three or min/max + ( 3.0, 2.0, 1.0, &[1.0, 2.0]), + ]; + + for &(input, low, high, output) in clamp_values { + let mut test = ShaderTest::new( + format!("clamp({input}, 0.0, 10.0) == {output:?})"), + String::from("value: f32, low: f32, high: f32"), + String::from("output[0] = bitcast(clamp(input.value, input.low, input.high));"), + &[input, low, high], + &[output[0]], + ); + for &extra in &output[1..] { + test = test.extra_output_values(&[extra]); + } + + tests.push(test); + } + + tests +} + +#[test] +fn numeric_builtins() { + initialize_test( + TestParameters::default() + .downlevel_flags(DownlevelFlags::COMPUTE_SHADERS) + .limits(Limits::downlevel_defaults()), + |ctx| { + shader_input_output_test( + ctx, + InputStorageType::Storage, + create_numeric_builtin_test(), + ); + }, + ); +} diff --git a/wgpu/tests/shader/shader_test.wgsl b/wgpu/tests/shader/shader_test.wgsl new file mode 100644 index 0000000000..efe8692bd5 --- /dev/null +++ b/wgpu/tests/shader/shader_test.wgsl @@ -0,0 +1,14 @@ +struct CustomStruct { + {{input_members}} +} + +{{input_bindings}} +var<{{storage_type}}> input: {{input_type}}; + +@group(0) @binding(1) +var output: {{output_type}}; + +@compute @workgroup_size(1) +fn cs_main() { + {{body}} +} diff --git a/wgpu/tests/shader/struct_layout.rs b/wgpu/tests/shader/struct_layout.rs new file mode 100644 index 0000000000..2250143b5f --- /dev/null +++ b/wgpu/tests/shader/struct_layout.rs @@ -0,0 +1,229 @@ +use std::fmt::Write; + +use wgpu::{Backends, DownlevelFlags, Features, Limits}; + +use crate::{ + common::{initialize_test, TestParameters}, + shader::{shader_input_output_test, InputStorageType, ShaderTest, MAX_BUFFER_SIZE}, +}; + +fn create_struct_layout_tests(storage_type: InputStorageType) -> Vec { + let input_values: Vec<_> = (0..(MAX_BUFFER_SIZE as u32 / 4)).collect(); + + let mut tests = Vec::new(); + + // Vector tests + for components in [2, 3, 4] { + for ty in ["f32", "u32", "i32"] { + let input_members = format!("member: vec{components}<{ty}>,"); + // There's 2 possible ways to load a component of a vector: + // - Do `input.member.x` (direct) + // - Store `input.member` in a variable; do `var.x` (loaded) + let mut direct = String::new(); + let mut loaded = String::from("let loaded = input.member;"); + let component_accessors = ["x", "y", "z", "w"] + .into_iter() + .take(components) + .enumerate(); + for (idx, component) in component_accessors { + writeln!( + direct, + "output[{idx}] = bitcast(input.member.{component});" + ) + .unwrap(); + writeln!(loaded, "output[{idx}] = bitcast(loaded.{component});").unwrap(); + } + + tests.push(ShaderTest::new( + format!("vec{components}<{ty}> - direct"), + input_members.clone(), + direct, + &input_values, + &(0..components as u32).collect::>(), + )); + + tests.push(ShaderTest::new( + format!("vec{components}<{ty}> - loaded"), + input_members.clone(), + loaded, + &input_values, + &(0..components as u32).collect::>(), + )); + } + } + + // Matrix tests + for columns in [2, 3, 4] { + for rows in [2, 3, 4] { + let ty = format!("mat{columns}x{rows}"); + let input_members = format!("member: {ty},"); + // There's 3 possible ways to load a component of a matrix: + // - Do `input.member[0].x` (direct) + // - Store `input.member[0]` in a variable; do `var.x` (vector_loaded) + // - Store `input.member` in a variable; do `var[0].x` (fully_loaded) + let mut direct = String::new(); + let mut vector_loaded = String::new(); + let mut fully_loaded = String::from("let loaded = input.member;"); + for column in 0..columns { + writeln!(vector_loaded, "let vec_{column} = input.member[{column}];").unwrap(); + } + + let mut output_values = Vec::new(); + + let mut current_output_idx = 0; + let mut current_input_idx = 0; + for column in 0..columns { + let component_accessors = ["x", "y", "z", "w"].into_iter().take(rows); + for component in component_accessors { + writeln!( + direct, + "output[{current_output_idx}] = bitcast(input.member[{column}].{component});" + ) + .unwrap(); + writeln!( + vector_loaded, + "output[{current_output_idx}] = bitcast(vec_{column}.{component});" + ) + .unwrap(); + writeln!( + fully_loaded, + "output[{current_output_idx}] = bitcast(loaded[{column}].{component});" + ) + .unwrap(); + + output_values.push(current_input_idx); + current_input_idx += 1; + current_output_idx += 1; + } + // Round to next vec4 if we're matrices with vec3 columns + if rows == 3 { + current_input_idx += 1; + } + } + + // https://github.com/gfx-rs/naga/issues/1785 + let failures = if storage_type == InputStorageType::Uniform && rows == 2 { + Backends::GL + } else { + Backends::empty() + }; + + tests.push( + ShaderTest::new( + format!("{ty} - direct"), + input_members.clone(), + direct, + &input_values, + &output_values, + ) + .failures(failures), + ); + + tests.push( + ShaderTest::new( + format!("{ty} - vector loaded"), + input_members.clone(), + vector_loaded, + &input_values, + &output_values, + ) + .failures(failures), + ); + + tests.push( + ShaderTest::new( + format!("{ty} - fully loaded"), + input_members.clone(), + fully_loaded, + &input_values, + &output_values, + ) + .failures(failures), + ); + } + } + + // Vec3 alignment tests + for ty in ["f32", "u32", "i32"] { + let members = format!("_vec: vec3<{ty}>,\nscalar: {ty},"); + let direct = String::from("output[0] = bitcast(input.scalar);"); + + tests.push(ShaderTest::new( + format!("vec3<{ty}>, {ty} alignment"), + members, + direct, + &input_values, + &[3], + )); + } + + // Mat3 alignment tests + for ty in ["f32", "u32", "i32"] { + for columns in [2, 3, 4] { + let members = format!("_mat: mat{columns}x3,\nscalar: {ty},"); + let direct = String::from("output[0] = bitcast(input.scalar);"); + + tests.push(ShaderTest::new( + format!("mat{columns}x3, {ty} alignment"), + members, + direct, + &input_values, + &[columns * 4], + )); + } + } + + tests +} + +#[test] +fn uniform_input() { + initialize_test( + TestParameters::default() + .downlevel_flags(DownlevelFlags::COMPUTE_SHADERS) + .limits(Limits::downlevel_defaults()), + |ctx| { + shader_input_output_test( + ctx, + InputStorageType::Uniform, + create_struct_layout_tests(InputStorageType::Uniform), + ); + }, + ); +} + +#[test] +fn storage_input() { + initialize_test( + TestParameters::default() + .downlevel_flags(DownlevelFlags::COMPUTE_SHADERS) + .limits(Limits::downlevel_defaults()), + |ctx| { + shader_input_output_test( + ctx, + InputStorageType::Storage, + create_struct_layout_tests(InputStorageType::Storage), + ); + }, + ); +} + +#[test] +fn push_constant_input() { + initialize_test( + TestParameters::default() + .features(Features::PUSH_CONSTANTS) + .downlevel_flags(DownlevelFlags::COMPUTE_SHADERS) + .limits(Limits { + max_push_constant_size: MAX_BUFFER_SIZE as u32, + ..Limits::downlevel_defaults() + }), + |ctx| { + shader_input_output_test( + ctx, + InputStorageType::PushConstant, + create_struct_layout_tests(InputStorageType::PushConstant), + ); + }, + ); +} diff --git a/wgpu/tests/shader_primitive_index/mod.rs b/wgpu/tests/shader_primitive_index/mod.rs index 41902f7225..5e6c6b1b70 100644 --- a/wgpu/tests/shader_primitive_index/mod.rs +++ b/wgpu/tests/shader_primitive_index/mod.rs @@ -237,7 +237,7 @@ fn capture_rgba_u8_texture( let slice = output_buffer.slice(..); slice.map_async(wgpu::MapMode::Read, |_| ()); ctx.device.poll(wgpu::Maintain::Wait); - let data: Vec = bytemuck::cast_slice(&*slice.get_mapped_range()).to_vec(); + let data: Vec = bytemuck::cast_slice(&slice.get_mapped_range()).to_vec(); // Chunk rows from output buffer, take actual pixel // bytes from each row and flatten into a vector. data.chunks_exact(bytes_per_row as usize) diff --git a/wgpu/tests/vertex_indices/mod.rs b/wgpu/tests/vertex_indices/mod.rs index d76b436784..177b857448 100644 --- a/wgpu/tests/vertex_indices/mod.rs +++ b/wgpu/tests/vertex_indices/mod.rs @@ -125,7 +125,7 @@ fn pulling_common( let slice = buffer.slice(..); slice.map_async(wgpu::MapMode::Read, |_| ()); ctx.device.poll(wgpu::Maintain::Wait); - let data: Vec = bytemuck::cast_slice(&*slice.get_mapped_range()).to_vec(); + let data: Vec = bytemuck::cast_slice(&slice.get_mapped_range()).to_vec(); assert_eq!(data, expected); } diff --git a/wgpu/tests/zero_init_texture_after_discard.rs b/wgpu/tests/zero_init_texture_after_discard.rs index 9307bc7fbd..6043d0388f 100644 --- a/wgpu/tests/zero_init_texture_after_discard.rs +++ b/wgpu/tests/zero_init_texture_after_discard.rs @@ -110,58 +110,55 @@ fn discarding_depth_target_resets_texture_init_state_check_visible_on_copy_in_sa #[test] fn discarding_either_depth_or_stencil_aspect() { - initialize_test( - TestParameters::default().features(wgpu::Features::DEPTH24PLUS_STENCIL8), - |ctx| { - let (texture, _) = create_white_texture_and_readback_buffer( - &ctx, - wgpu::TextureFormat::Depth24PlusStencil8, - ); - // TODO: How do we test this other than "doesn't crash"? We can't copy the texture to/from buffers, so we would need to do a copy in a shader - { - let mut encoder = ctx - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); - encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Depth Discard, Stencil Load"), - color_attachments: &[], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &texture.create_view(&wgpu::TextureViewDescriptor::default()), - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Load, - store: false, // discard! - }), - stencil_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(0), - store: true, - }), + initialize_test(TestParameters::default(), |ctx| { + let (texture, _) = create_white_texture_and_readback_buffer( + &ctx, + wgpu::TextureFormat::Depth24PlusStencil8, + ); + // TODO: How do we test this other than "doesn't crash"? We can't copy the texture to/from buffers, so we would need to do a copy in a shader + { + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Depth Discard, Stencil Load"), + color_attachments: &[], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &texture.create_view(&wgpu::TextureViewDescriptor::default()), + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Load, + store: false, // discard! }), - }); - ctx.queue.submit([encoder.finish()]); - } - { - let mut encoder = ctx - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); - encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Depth Load, Stencil Discard"), - color_attachments: &[], - depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { - view: &texture.create_view(&wgpu::TextureViewDescriptor::default()), - depth_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Clear(0.0), - store: true, - }), - stencil_ops: Some(wgpu::Operations { - load: wgpu::LoadOp::Load, - store: false, // discard! - }), + stencil_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(0), + store: true, }), - }); - ctx.queue.submit([encoder.finish()]); - } - }, - ); + }), + }); + ctx.queue.submit([encoder.finish()]); + } + { + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Depth Load, Stencil Discard"), + color_attachments: &[], + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { + view: &texture.create_view(&wgpu::TextureViewDescriptor::default()), + depth_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Clear(0.0), + store: true, + }), + stencil_ops: Some(wgpu::Operations { + load: wgpu::LoadOp::Load, + store: false, // discard! + }), + }), + }); + ctx.queue.submit([encoder.finish()]); + } + }); } const TEXTURE_SIZE: wgpu::Extent3d = wgpu::Extent3d {