| // Copyright 2017 The Dawn Authors |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| #include "tests/unittests/validation/ValidationTest.h" |
| |
| #include "common/Assert.h" |
| #include "common/Constants.h" |
| #include "utils/ComboRenderPipelineDescriptor.h" |
| #include "utils/WGPUHelpers.h" |
| |
| class BindGroupValidationTest : public ValidationTest { |
| public: |
| wgpu::Texture CreateTexture(wgpu::TextureUsage usage, |
| wgpu::TextureFormat format, |
| uint32_t layerCount) { |
| wgpu::TextureDescriptor descriptor; |
| descriptor.dimension = wgpu::TextureDimension::e2D; |
| descriptor.size = {16, 16, layerCount}; |
| descriptor.sampleCount = 1; |
| descriptor.mipLevelCount = 1; |
| descriptor.usage = usage; |
| descriptor.format = format; |
| |
| return device.CreateTexture(&descriptor); |
| } |
| |
| void SetUp() override { |
| ValidationTest::SetUp(); |
| |
| // Create objects to use as resources inside test bind groups. |
| { |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 1024; |
| descriptor.usage = wgpu::BufferUsage::Uniform; |
| mUBO = device.CreateBuffer(&descriptor); |
| } |
| { |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 1024; |
| descriptor.usage = wgpu::BufferUsage::Storage; |
| mSSBO = device.CreateBuffer(&descriptor); |
| } |
| { mSampler = device.CreateSampler(); } |
| { |
| mSampledTexture = |
| CreateTexture(wgpu::TextureUsage::TextureBinding, kDefaultTextureFormat, 1); |
| mSampledTextureView = mSampledTexture.CreateView(); |
| |
| wgpu::ExternalTextureDescriptor externalTextureDesc; |
| externalTextureDesc.format = kDefaultTextureFormat; |
| externalTextureDesc.plane0 = mSampledTextureView; |
| mExternalTexture = device.CreateExternalTexture(&externalTextureDesc); |
| mExternalTextureBindingEntry.externalTexture = mExternalTexture; |
| } |
| } |
| |
| protected: |
| wgpu::Buffer mUBO; |
| wgpu::Buffer mSSBO; |
| wgpu::Sampler mSampler; |
| wgpu::Texture mSampledTexture; |
| wgpu::TextureView mSampledTextureView; |
| wgpu::ExternalTextureBindingEntry mExternalTextureBindingEntry; |
| |
| static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm; |
| |
| private: |
| wgpu::ExternalTexture mExternalTexture; |
| }; |
| |
| // Test the validation of BindGroupDescriptor::nextInChain |
| TEST_F(BindGroupValidationTest, NextInChainNullptr) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(device, {}); |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 0; |
| descriptor.entries = nullptr; |
| |
| // Control case: check that nextInChain = nullptr is valid |
| descriptor.nextInChain = nullptr; |
| device.CreateBindGroup(&descriptor); |
| |
| // Check that nextInChain != nullptr is an error. |
| wgpu::ChainedStruct chainedDescriptor; |
| chainedDescriptor.sType = wgpu::SType::Invalid; |
| descriptor.nextInChain = &chainedDescriptor; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| } |
| |
| // Check constraints on entryCount |
| TEST_F(BindGroupValidationTest, EntryCountMismatch) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| // Control case: check that a descriptor with one binding is ok |
| utils::MakeBindGroup(device, layout, {{0, mSampler}}); |
| |
| // Check that entryCount != layout.entryCount fails. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {})); |
| } |
| |
| // Check constraints on BindGroupEntry::binding |
| TEST_F(BindGroupValidationTest, WrongBindings) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| // Control case: check that a descriptor with a binding matching the layout's is ok |
| utils::MakeBindGroup(device, layout, {{0, mSampler}}); |
| |
| // Check that binding must be present in the layout |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{1, mSampler}})); |
| } |
| |
| // Check that the same binding cannot be set twice |
| TEST_F(BindGroupValidationTest, BindingSetTwice) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| // Control case: check that different bindings work |
| utils::MakeBindGroup(device, layout, {{0, mSampler}, {1, mSampler}}); |
| |
| // Check that setting the same binding twice is invalid |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mSampler}, {0, mSampler}})); |
| } |
| |
| // Check that a sampler binding must contain exactly one sampler |
| TEST_F(BindGroupValidationTest, SamplerBindingType) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| wgpu::BindGroupEntry binding; |
| binding.binding = 0; |
| binding.sampler = nullptr; |
| binding.textureView = nullptr; |
| binding.buffer = nullptr; |
| binding.offset = 0; |
| binding.size = 0; |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 1; |
| descriptor.entries = &binding; |
| |
| // Not setting anything fails |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| |
| // Control case: setting just the sampler works |
| binding.sampler = mSampler; |
| device.CreateBindGroup(&descriptor); |
| |
| // Setting the texture view as well is an error |
| binding.textureView = mSampledTextureView; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| |
| // Setting the buffer as well is an error |
| binding.buffer = mUBO; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.buffer = nullptr; |
| |
| // Setting the external texture view as well is an error |
| binding.nextInChain = &mExternalTextureBindingEntry; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.nextInChain = nullptr; |
| |
| // Setting the sampler to an error sampler is an error. |
| { |
| wgpu::SamplerDescriptor samplerDesc; |
| samplerDesc.minFilter = static_cast<wgpu::FilterMode>(0xFFFFFFFF); |
| |
| wgpu::Sampler errorSampler; |
| ASSERT_DEVICE_ERROR(errorSampler = device.CreateSampler(&samplerDesc)); |
| |
| binding.sampler = errorSampler; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.sampler = nullptr; |
| } |
| } |
| |
| // Check that a texture binding must contain exactly a texture view |
| TEST_F(BindGroupValidationTest, TextureBindingType) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}}); |
| |
| wgpu::BindGroupEntry binding; |
| binding.binding = 0; |
| binding.sampler = nullptr; |
| binding.textureView = nullptr; |
| binding.buffer = nullptr; |
| binding.offset = 0; |
| binding.size = 0; |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 1; |
| descriptor.entries = &binding; |
| |
| // Not setting anything fails |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| |
| // Control case: setting just the texture view works |
| binding.textureView = mSampledTextureView; |
| device.CreateBindGroup(&descriptor); |
| |
| // Setting the sampler as well is an error |
| binding.sampler = mSampler; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| |
| // Setting the buffer as well is an error |
| binding.buffer = mUBO; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.buffer = nullptr; |
| |
| // Setting the external texture view as well is an error |
| binding.nextInChain = &mExternalTextureBindingEntry; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.nextInChain = nullptr; |
| |
| // Setting the texture view to an error texture view is an error. |
| { |
| wgpu::TextureViewDescriptor viewDesc; |
| viewDesc.format = kDefaultTextureFormat; |
| viewDesc.dimension = wgpu::TextureViewDimension::e2D; |
| viewDesc.baseMipLevel = 0; |
| viewDesc.mipLevelCount = 0; |
| viewDesc.baseArrayLayer = 0; |
| viewDesc.arrayLayerCount = 1000; |
| |
| wgpu::TextureView errorView; |
| ASSERT_DEVICE_ERROR(errorView = mSampledTexture.CreateView(&viewDesc)); |
| |
| binding.textureView = errorView; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| } |
| } |
| |
| // Check that a buffer binding must contain exactly a buffer |
| TEST_F(BindGroupValidationTest, BufferBindingType) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}}); |
| |
| wgpu::BindGroupEntry binding; |
| binding.binding = 0; |
| binding.sampler = nullptr; |
| binding.textureView = nullptr; |
| binding.buffer = nullptr; |
| binding.offset = 0; |
| binding.size = 1024; |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 1; |
| descriptor.entries = &binding; |
| |
| // Not setting anything fails |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| |
| // Control case: setting just the buffer works |
| binding.buffer = mUBO; |
| device.CreateBindGroup(&descriptor); |
| |
| // Setting the texture view as well is an error |
| binding.textureView = mSampledTextureView; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| |
| // Setting the sampler as well is an error |
| binding.sampler = mSampler; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.sampler = nullptr; |
| |
| // Setting the external texture view as well is an error |
| binding.nextInChain = &mExternalTextureBindingEntry; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.nextInChain = nullptr; |
| |
| // Setting the buffer to an error buffer is an error. |
| { |
| wgpu::BufferDescriptor bufferDesc; |
| bufferDesc.size = 1024; |
| bufferDesc.usage = static_cast<wgpu::BufferUsage>(0xFFFFFFFF); |
| |
| wgpu::Buffer errorBuffer; |
| ASSERT_DEVICE_ERROR(errorBuffer = device.CreateBuffer(&bufferDesc)); |
| |
| binding.buffer = errorBuffer; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.buffer = nullptr; |
| } |
| } |
| |
| // Check that an external texture binding must contain exactly an external texture |
| TEST_F(BindGroupValidationTest, ExternalTextureBindingType) { |
| // Create an external texture |
| wgpu::Texture texture = |
| CreateTexture(wgpu::TextureUsage::TextureBinding, kDefaultTextureFormat, 1); |
| wgpu::ExternalTextureDescriptor externalDesc; |
| externalDesc.plane0 = texture.CreateView(); |
| externalDesc.format = kDefaultTextureFormat; |
| wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc); |
| |
| // Create a bind group layout for a single external texture |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}}); |
| |
| wgpu::BindGroupEntry binding; |
| binding.binding = 0; |
| binding.sampler = nullptr; |
| binding.textureView = nullptr; |
| binding.buffer = nullptr; |
| binding.offset = 0; |
| binding.size = 0; |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 1; |
| descriptor.entries = &binding; |
| |
| // Not setting anything fails |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| |
| // Control case: setting just the external texture works |
| wgpu::ExternalTextureBindingEntry externalBindingEntry; |
| externalBindingEntry.externalTexture = externalTexture; |
| binding.nextInChain = &externalBindingEntry; |
| device.CreateBindGroup(&descriptor); |
| |
| // Setting the texture view as well is an error |
| binding.textureView = mSampledTextureView; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| |
| // Setting the sampler as well is an error |
| binding.sampler = mSampler; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.sampler = nullptr; |
| |
| // Setting the buffer as well is an error |
| binding.buffer = mUBO; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.buffer = nullptr; |
| |
| // Setting the external texture to an error external texture is an error. |
| { |
| wgpu::ExternalTextureDescriptor errorExternalDesciptor; |
| errorExternalDesciptor.plane0 = texture.CreateView(); |
| errorExternalDesciptor.format = wgpu::TextureFormat::R8Uint; |
| |
| wgpu::ExternalTexture errorExternalTexture; |
| ASSERT_DEVICE_ERROR(errorExternalTexture = |
| device.CreateExternalTexture(&errorExternalDesciptor)); |
| |
| wgpu::ExternalTextureBindingEntry errorExternalBindingEntry; |
| errorExternalBindingEntry.externalTexture = errorExternalTexture; |
| binding.nextInChain = &errorExternalBindingEntry; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.nextInChain = nullptr; |
| } |
| |
| // Setting an external texture with another external texture chained is an error. |
| { |
| wgpu::ExternalTexture externalTexture2 = device.CreateExternalTexture(&externalDesc); |
| wgpu::ExternalTextureBindingEntry externalBindingEntry2; |
| externalBindingEntry2.externalTexture = externalTexture2; |
| externalBindingEntry.nextInChain = &externalBindingEntry2; |
| |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| } |
| |
| // Chaining a struct that isn't an external texture binding entry is an error. |
| { |
| wgpu::ExternalTextureBindingLayout externalBindingLayout; |
| binding.nextInChain = &externalBindingLayout; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| } |
| } |
| |
| // Check that a texture binding must have the correct usage |
| TEST_F(BindGroupValidationTest, TextureUsage) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}}); |
| |
| // Control case: setting a sampleable texture view works. |
| utils::MakeBindGroup(device, layout, {{0, mSampledTextureView}}); |
| |
| // Make an render attachment texture and try to set it for a SampledTexture binding |
| wgpu::Texture outputTexture = |
| CreateTexture(wgpu::TextureUsage::RenderAttachment, wgpu::TextureFormat::RGBA8Unorm, 1); |
| wgpu::TextureView outputTextureView = outputTexture.CreateView(); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, outputTextureView}})); |
| } |
| |
| // Check that a storage texture binding must have the correct usage |
| TEST_F(BindGroupValidationTest, StorageTextureUsage) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, |
| wgpu::TextureFormat::RGBA8Uint}}); |
| |
| wgpu::TextureDescriptor descriptor; |
| descriptor.dimension = wgpu::TextureDimension::e2D; |
| descriptor.size = {16, 16, 1}; |
| descriptor.sampleCount = 1; |
| descriptor.mipLevelCount = 1; |
| descriptor.usage = wgpu::TextureUsage::StorageBinding; |
| descriptor.format = wgpu::TextureFormat::RGBA8Uint; |
| |
| wgpu::TextureView view = device.CreateTexture(&descriptor).CreateView(); |
| |
| // Control case: setting a storage texture view works. |
| utils::MakeBindGroup(device, layout, {{0, view}}); |
| |
| // Sampled texture is invalid with storage buffer binding |
| descriptor.usage = wgpu::TextureUsage::TextureBinding; |
| descriptor.format = wgpu::TextureFormat::RGBA8Unorm; |
| view = device.CreateTexture(&descriptor).CreateView(); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}})); |
| |
| // Multisampled texture is invalid with storage buffer binding |
| // Regression case for crbug.com/dawn/614 where this hit an ASSERT. |
| descriptor.sampleCount = 4; |
| view = device.CreateTexture(&descriptor).CreateView(); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}})); |
| } |
| |
| // Check that a texture must have the correct sample type |
| TEST_F(BindGroupValidationTest, TextureSampleType) { |
| auto DoTest = [this](bool success, wgpu::TextureFormat format, |
| wgpu::TextureSampleType sampleType) { |
| wgpu::BindGroupLayout layout = |
| utils::MakeBindGroupLayout(device, {{0, wgpu::ShaderStage::Fragment, sampleType}}); |
| |
| wgpu::TextureDescriptor descriptor; |
| descriptor.size = {4, 4, 1}; |
| descriptor.usage = wgpu::TextureUsage::TextureBinding; |
| descriptor.format = format; |
| |
| wgpu::TextureView view = device.CreateTexture(&descriptor).CreateView(); |
| |
| if (success) { |
| utils::MakeBindGroup(device, layout, {{0, view}}); |
| } else { |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, view}})); |
| } |
| }; |
| |
| // Test that RGBA8Unorm is only compatible with float/unfilterable-float |
| DoTest(true, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Float); |
| DoTest(true, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::UnfilterableFloat); |
| DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Depth); |
| DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Uint); |
| DoTest(false, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureSampleType::Sint); |
| |
| // Test that R32Float is only compatible with unfilterable-float |
| DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Float); |
| DoTest(true, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::UnfilterableFloat); |
| DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Depth); |
| DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Uint); |
| DoTest(false, wgpu::TextureFormat::R32Float, wgpu::TextureSampleType::Sint); |
| |
| // Test that Depth32Float is only compatible with depth. |
| DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Float); |
| DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::UnfilterableFloat); |
| DoTest(true, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Depth); |
| DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Uint); |
| DoTest(false, wgpu::TextureFormat::Depth32Float, wgpu::TextureSampleType::Sint); |
| |
| // Test that RG8Uint is only compatible with uint |
| DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Float); |
| DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::UnfilterableFloat); |
| DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Depth); |
| DoTest(true, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Uint); |
| DoTest(false, wgpu::TextureFormat::RG8Uint, wgpu::TextureSampleType::Sint); |
| |
| // Test that R16Sint is only compatible with sint |
| DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Float); |
| DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::UnfilterableFloat); |
| DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Depth); |
| DoTest(false, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Uint); |
| DoTest(true, wgpu::TextureFormat::R16Sint, wgpu::TextureSampleType::Sint); |
| } |
| |
| // Test which depth-stencil formats are allowed to be sampled (all). |
| TEST_F(BindGroupValidationTest, SamplingDepthStencilTexture) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}}); |
| |
| wgpu::TextureDescriptor desc; |
| desc.size = {1, 1, 1}; |
| desc.usage = wgpu::TextureUsage::TextureBinding; |
| |
| // Depth32Float is allowed to be sampled. |
| { |
| desc.format = wgpu::TextureFormat::Depth32Float; |
| wgpu::Texture texture = device.CreateTexture(&desc); |
| |
| utils::MakeBindGroup(device, layout, {{0, texture.CreateView()}}); |
| } |
| |
| // Depth24Plus is allowed to be sampled. |
| { |
| desc.format = wgpu::TextureFormat::Depth24Plus; |
| wgpu::Texture texture = device.CreateTexture(&desc); |
| |
| utils::MakeBindGroup(device, layout, {{0, texture.CreateView()}}); |
| } |
| |
| // Depth24PlusStencil8 is allowed to be sampled, if the depth or stencil aspect is selected. |
| { |
| desc.format = wgpu::TextureFormat::Depth24PlusStencil8; |
| wgpu::Texture texture = device.CreateTexture(&desc); |
| wgpu::TextureViewDescriptor viewDesc = {}; |
| |
| viewDesc.aspect = wgpu::TextureAspect::DepthOnly; |
| utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&viewDesc)}}); |
| |
| layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Uint}}); |
| |
| viewDesc.aspect = wgpu::TextureAspect::StencilOnly; |
| utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&viewDesc)}}); |
| } |
| } |
| |
| // Check that a texture must have the correct dimension |
| TEST_F(BindGroupValidationTest, TextureDimension) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}}); |
| |
| // Control case: setting a 2D texture view works. |
| utils::MakeBindGroup(device, layout, {{0, mSampledTextureView}}); |
| |
| // Make a 2DArray texture and try to set it to a 2D binding. |
| wgpu::Texture arrayTexture = |
| CreateTexture(wgpu::TextureUsage::TextureBinding, wgpu::TextureFormat::RGBA8Uint, 2); |
| wgpu::TextureView arrayTextureView = arrayTexture.CreateView(); |
| |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, arrayTextureView}})); |
| } |
| |
| // Check that a UBO must have the correct usage |
| TEST_F(BindGroupValidationTest, BufferUsageUBO) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}}); |
| |
| // Control case: using a buffer with the uniform usage works |
| utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}}); |
| |
| // Using a buffer without the uniform usage fails |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}})); |
| } |
| |
| // Check that a SSBO must have the correct usage |
| TEST_F(BindGroupValidationTest, BufferUsageSSBO) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| // Control case: using a buffer with the storage usage works |
| utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}}); |
| |
| // Using a buffer without the storage usage fails |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}})); |
| } |
| |
| // Check that a readonly SSBO must have the correct usage |
| TEST_F(BindGroupValidationTest, BufferUsageReadonlySSBO) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}}); |
| |
| // Control case: using a buffer with the storage usage works |
| utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}}); |
| |
| // Using a buffer without the storage usage fails |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 256}})); |
| } |
| |
| // Check that a resolve buffer with internal storge usage cannot be used as SSBO |
| TEST_F(BindGroupValidationTest, BufferUsageQueryResolve) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| // Control case: using a buffer with the storage usage works |
| utils::MakeBindGroup(device, layout, {{0, mSSBO, 0, 256}}); |
| |
| // Using a resolve buffer with the internal storage usage fails |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 1024; |
| descriptor.usage = wgpu::BufferUsage::QueryResolve; |
| wgpu::Buffer buffer = device.CreateBuffer(&descriptor); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256}})); |
| } |
| |
| // Tests constraints on the buffer offset for bind groups. |
| TEST_F(BindGroupValidationTest, BufferOffsetAlignment) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| }); |
| |
| // Check that offset 0 is valid |
| utils::MakeBindGroup(device, layout, {{0, mUBO, 0, 512}}); |
| |
| // Check that offset 256 (aligned) is valid |
| utils::MakeBindGroup(device, layout, {{0, mUBO, 256, 256}}); |
| |
| // Check cases where unaligned buffer offset is invalid |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 1, 256}})); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 128, 256}})); |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, mUBO, 255, 256}})); |
| } |
| |
| // Tests constraints on the texture for MultisampledTexture bindings |
| TEST_F(BindGroupValidationTest, MultisampledTexture) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D, true}}); |
| |
| wgpu::BindGroupEntry binding; |
| binding.binding = 0; |
| binding.sampler = nullptr; |
| binding.textureView = nullptr; |
| binding.buffer = nullptr; |
| binding.offset = 0; |
| binding.size = 0; |
| |
| wgpu::BindGroupDescriptor descriptor; |
| descriptor.layout = layout; |
| descriptor.entryCount = 1; |
| descriptor.entries = &binding; |
| |
| // Not setting anything fails |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| |
| // Control case: setting a multisampled 2D texture works |
| wgpu::TextureDescriptor textureDesc; |
| textureDesc.sampleCount = 4; |
| textureDesc.usage = wgpu::TextureUsage::TextureBinding; |
| textureDesc.dimension = wgpu::TextureDimension::e2D; |
| textureDesc.format = wgpu::TextureFormat::RGBA8Unorm; |
| textureDesc.size = {1, 1, 1}; |
| wgpu::Texture msTexture = device.CreateTexture(&textureDesc); |
| |
| binding.textureView = msTexture.CreateView(); |
| device.CreateBindGroup(&descriptor); |
| binding.textureView = nullptr; |
| |
| // Error case: setting a single sampled 2D texture is an error. |
| binding.textureView = mSampledTextureView; |
| ASSERT_DEVICE_ERROR(device.CreateBindGroup(&descriptor)); |
| binding.textureView = nullptr; |
| } |
| |
| // Tests constraints to be sure the buffer binding fits in the buffer |
| TEST_F(BindGroupValidationTest, BufferBindingOOB) { |
| wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| }); |
| |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 1024; |
| descriptor.usage = wgpu::BufferUsage::Uniform; |
| wgpu::Buffer buffer = device.CreateBuffer(&descriptor); |
| |
| // Success case, touching the start of the buffer works |
| utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256}}); |
| |
| // Success case, touching the end of the buffer works |
| utils::MakeBindGroup(device, layout, {{0, buffer, 3 * 256, 256}}); |
| |
| // Error case, zero size is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 1024, 0}})); |
| |
| // Success case, touching the full buffer works |
| utils::MakeBindGroup(device, layout, {{0, buffer, 0, 1024}}); |
| utils::MakeBindGroup(device, layout, {{0, buffer, 0, wgpu::kWholeSize}}); |
| |
| // Success case, whole size causes the rest of the buffer to be used but not beyond. |
| utils::MakeBindGroup(device, layout, {{0, buffer, 256, wgpu::kWholeSize}}); |
| |
| // Error case, offset is OOB |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 256 * 5, 0}})); |
| |
| // Error case, size is OOB |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 0, 256 * 5}})); |
| |
| // Error case, offset+size is OOB |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, layout, {{0, buffer, 1024, 256}})); |
| |
| // Error case, offset+size overflows to be 0 |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, layout, {{0, buffer, 256, uint32_t(0) - uint32_t(256)}})); |
| } |
| |
| // Tests constraints to be sure the uniform buffer binding isn't too large |
| TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) { |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 2 * kMaxUniformBufferBindingSize; |
| descriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage; |
| wgpu::Buffer buffer = device.CreateBuffer(&descriptor); |
| |
| wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}); |
| |
| // Success case, this is exactly the limit |
| utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxUniformBufferBindingSize}}); |
| |
| wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| {1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}); |
| |
| // Success case, individual bindings don't exceed the limit |
| utils::MakeBindGroup(device, doubleUniformLayout, |
| {{0, buffer, 0, kMaxUniformBufferBindingSize}, |
| {1, buffer, kMaxUniformBufferBindingSize, kMaxUniformBufferBindingSize}}); |
| |
| // Error case, this is above the limit |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, uniformLayout, |
| {{0, buffer, 0, kMaxUniformBufferBindingSize + 1}})); |
| |
| // Making sure the constraint doesn't apply to storage buffers |
| wgpu::BindGroupLayout readonlyStorageLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}}); |
| wgpu::BindGroupLayout storageLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| // Success case, storage buffer can still be created. |
| utils::MakeBindGroup(device, readonlyStorageLayout, |
| {{0, buffer, 0, 2 * kMaxUniformBufferBindingSize}}); |
| utils::MakeBindGroup(device, storageLayout, {{0, buffer, 0, 2 * kMaxUniformBufferBindingSize}}); |
| } |
| |
| // Tests constraints to be sure the storage buffer binding isn't too large |
| TEST_F(BindGroupValidationTest, MaxStorageBufferBindingSize) { |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 2 * kMaxStorageBufferBindingSize; |
| descriptor.usage = wgpu::BufferUsage::Storage; |
| wgpu::Buffer buffer = device.CreateBuffer(&descriptor); |
| |
| wgpu::BindGroupLayout uniformLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| // Success case, this is exactly the limit |
| utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxStorageBufferBindingSize}}); |
| |
| // Success case, this is one less than the limit (check it is not an alignment constraint) |
| utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxStorageBufferBindingSize - 1}}); |
| |
| wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| // Success case, individual bindings don't exceed the limit |
| utils::MakeBindGroup(device, doubleUniformLayout, |
| {{0, buffer, 0, kMaxStorageBufferBindingSize}, |
| {1, buffer, kMaxStorageBufferBindingSize, kMaxStorageBufferBindingSize}}); |
| |
| // Error case, this is above the limit |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, uniformLayout, |
| {{0, buffer, 0, kMaxStorageBufferBindingSize + 1}})); |
| } |
| |
| // Test what happens when the layout is an error. |
| TEST_F(BindGroupValidationTest, ErrorLayout) { |
| wgpu::BindGroupLayout goodLayout = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| }); |
| |
| wgpu::BindGroupLayout errorLayout; |
| ASSERT_DEVICE_ERROR( |
| errorLayout = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| })); |
| |
| // Control case, creating with the good layout works |
| utils::MakeBindGroup(device, goodLayout, {{0, mUBO, 0, 256}}); |
| |
| // Creating with an error layout fails |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, errorLayout, {{0, mUBO, 0, 256}})); |
| } |
| |
| class BindGroupLayoutValidationTest : public ValidationTest { |
| public: |
| wgpu::BindGroupLayout MakeBindGroupLayout(wgpu::BindGroupLayoutEntry* binding, uint32_t count) { |
| wgpu::BindGroupLayoutDescriptor descriptor; |
| descriptor.entryCount = count; |
| descriptor.entries = binding; |
| return device.CreateBindGroupLayout(&descriptor); |
| } |
| |
| void TestCreateBindGroupLayout(wgpu::BindGroupLayoutEntry* binding, |
| uint32_t count, |
| bool expected) { |
| wgpu::BindGroupLayoutDescriptor descriptor; |
| |
| descriptor.entryCount = count; |
| descriptor.entries = binding; |
| |
| if (!expected) { |
| ASSERT_DEVICE_ERROR(device.CreateBindGroupLayout(&descriptor)); |
| } else { |
| device.CreateBindGroupLayout(&descriptor); |
| } |
| } |
| |
| void TestCreatePipelineLayout(wgpu::BindGroupLayout* bgl, uint32_t count, bool expected) { |
| wgpu::PipelineLayoutDescriptor descriptor; |
| |
| descriptor.bindGroupLayoutCount = count; |
| descriptor.bindGroupLayouts = bgl; |
| |
| if (!expected) { |
| ASSERT_DEVICE_ERROR(device.CreatePipelineLayout(&descriptor)); |
| } else { |
| device.CreatePipelineLayout(&descriptor); |
| } |
| } |
| }; |
| |
| // Tests setting storage buffer and readonly storage buffer bindings in vertex and fragment shader. |
| TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutStorageBindingsInVertexShader) { |
| // Checks that storage buffer binding is not supported in vertex shader. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Storage}})); |
| |
| utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::ReadOnlyStorage}}); |
| |
| utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); |
| |
| utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}}); |
| } |
| |
| // Tests setting that bind group layout bindings numbers may be very large. |
| TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutEntryNumberLarge) { |
| // Checks that uint32_t max is valid. |
| utils::MakeBindGroupLayout(device, |
| {{std::numeric_limits<uint32_t>::max(), wgpu::ShaderStage::Vertex, |
| wgpu::BufferBindingType::Uniform}}); |
| } |
| |
| // This test verifies that the BindGroupLayout bindings are correctly validated, even if the |
| // binding ids are out-of-order. |
| TEST_F(BindGroupLayoutValidationTest, BindGroupEntry) { |
| utils::MakeBindGroupLayout(device, |
| { |
| {1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| }); |
| } |
| |
| // Check that dynamic = true is only allowed buffer bindings. |
| TEST_F(BindGroupLayoutValidationTest, DynamicAndTypeCompatibility) { |
| utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true}, |
| }); |
| |
| utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage, true}, |
| }); |
| |
| utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true}, |
| }); |
| } |
| |
| // This test verifies that visibility of bindings in BindGroupLayout can be none |
| TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutVisibilityNone) { |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| }); |
| |
| wgpu::BindGroupLayoutEntry entry; |
| entry.binding = 0; |
| entry.visibility = wgpu::ShaderStage::None; |
| entry.buffer.type = wgpu::BufferBindingType::Uniform; |
| wgpu::BindGroupLayoutDescriptor descriptor; |
| descriptor.entryCount = 1; |
| descriptor.entries = &entry; |
| device.CreateBindGroupLayout(&descriptor); |
| } |
| |
| // This test verifies that binding with none visibility in bind group layout can be supported in |
| // bind group |
| TEST_F(BindGroupLayoutValidationTest, BindGroupLayoutVisibilityNoneExpectsBindGroupEntry) { |
| wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}, |
| {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Uniform}, |
| }); |
| wgpu::BufferDescriptor descriptor; |
| descriptor.size = 4; |
| descriptor.usage = wgpu::BufferUsage::Uniform; |
| wgpu::Buffer buffer = device.CreateBuffer(&descriptor); |
| |
| utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}}); |
| |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, buffer}})); |
| } |
| |
| #define BGLEntryType(...) \ |
| utils::BindingLayoutEntryInitializationHelper(0, wgpu::ShaderStage::Compute, __VA_ARGS__) |
| |
| TEST_F(BindGroupLayoutValidationTest, PerStageLimits) { |
| struct TestInfo { |
| uint32_t maxCount; |
| wgpu::BindGroupLayoutEntry entry; |
| wgpu::BindGroupLayoutEntry otherEntry; |
| }; |
| |
| std::array<TestInfo, 7> kTestInfos = { |
| TestInfo{kMaxSampledTexturesPerShaderStage, BGLEntryType(wgpu::TextureSampleType::Float), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxSamplersPerShaderStage, BGLEntryType(wgpu::SamplerBindingType::Filtering), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxSamplersPerShaderStage, BGLEntryType(wgpu::SamplerBindingType::Comparison), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxStorageBuffersPerShaderStage, BGLEntryType(wgpu::BufferBindingType::Storage), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{ |
| kMaxStorageTexturesPerShaderStage, |
| BGLEntryType(wgpu::StorageTextureAccess::WriteOnly, wgpu::TextureFormat::RGBA8Unorm), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxUniformBuffersPerShaderStage, BGLEntryType(wgpu::BufferBindingType::Uniform), |
| BGLEntryType(wgpu::TextureSampleType::Float)}, |
| // External textures use multiple bindings (3 sampled textures, 1 sampler, 1 uniform buffer) |
| // that count towards the per stage binding limits. The number of external textures are |
| // currently restricted by the maximum number of sampled textures. |
| TestInfo{kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture, |
| BGLEntryType(&utils::kExternalTextureBindingLayout), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}}; |
| |
| for (TestInfo info : kTestInfos) { |
| wgpu::BindGroupLayout bgl[2]; |
| std::vector<utils::BindingLayoutEntryInitializationHelper> maxBindings; |
| |
| for (uint32_t i = 0; i < info.maxCount; ++i) { |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = i; |
| maxBindings.push_back(entry); |
| } |
| |
| // Creating with the maxes works. |
| bgl[0] = MakeBindGroupLayout(maxBindings.data(), maxBindings.size()); |
| |
| // Adding an extra binding of a different type works. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.otherEntry; |
| entry.binding = info.maxCount; |
| bindings.push_back(entry); |
| MakeBindGroupLayout(bindings.data(), bindings.size()); |
| } |
| |
| // Adding an extra binding of the maxed type in a different stage works |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = info.maxCount; |
| entry.visibility = wgpu::ShaderStage::Fragment; |
| bindings.push_back(entry); |
| MakeBindGroupLayout(bindings.data(), bindings.size()); |
| } |
| |
| // Adding an extra binding of the maxed type and stage exceeds the per stage limit. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = info.maxCount; |
| bindings.push_back(entry); |
| ASSERT_DEVICE_ERROR(MakeBindGroupLayout(bindings.data(), bindings.size())); |
| } |
| |
| // Creating a pipeline layout from the valid BGL works. |
| TestCreatePipelineLayout(bgl, 1, true); |
| |
| // Adding an extra binding of a different type in a different BGL works |
| bgl[1] = utils::MakeBindGroupLayout(device, {info.otherEntry}); |
| TestCreatePipelineLayout(bgl, 2, true); |
| |
| { |
| // Adding an extra binding of the maxed type in a different stage works |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.visibility = wgpu::ShaderStage::Fragment; |
| bgl[1] = utils::MakeBindGroupLayout(device, {entry}); |
| TestCreatePipelineLayout(bgl, 2, true); |
| } |
| |
| // Adding an extra binding of the maxed type in a different BGL exceeds the per stage limit. |
| bgl[1] = utils::MakeBindGroupLayout(device, {info.entry}); |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| } |
| |
| // This is the same test as PerStageLimits but for the deprecated ReadOnly storage textures. |
| // TODO(crbug.com/dawn/1025): Remove once ReadOnly storage texture deprecation period is passed. |
| TEST_F(BindGroupLayoutValidationTest, PerStageLimits_ReadOnlyStorageTexture) { |
| uint32_t maxCount = kMaxStorageTexturesPerShaderStage; |
| wgpu::BindGroupLayoutEntry entry = |
| BGLEntryType(wgpu::StorageTextureAccess::ReadOnly, wgpu::TextureFormat::RGBA8Unorm); |
| wgpu::BindGroupLayoutEntry otherEntry = BGLEntryType(wgpu::BufferBindingType::Uniform); |
| |
| wgpu::BindGroupLayout bgl[2]; |
| std::vector<utils::BindingLayoutEntryInitializationHelper> maxBindings; |
| |
| for (uint32_t i = 0; i < maxCount; ++i) { |
| entry.binding = i; |
| maxBindings.push_back(entry); |
| } |
| |
| // Creating with the maxes works. |
| EXPECT_DEPRECATION_WARNINGS( |
| bgl[0] = MakeBindGroupLayout(maxBindings.data(), maxBindings.size()), maxCount); |
| |
| // Adding an extra binding of a different type works. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry newEntry = otherEntry; |
| newEntry.binding = maxCount; |
| bindings.push_back(newEntry); |
| EXPECT_DEPRECATION_WARNINGS(MakeBindGroupLayout(bindings.data(), bindings.size()), |
| maxCount); |
| } |
| |
| // Adding an extra binding of the maxed type in a different stage works |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry newEntry = entry; |
| newEntry.binding = maxCount; |
| newEntry.visibility = wgpu::ShaderStage::Fragment; |
| bindings.push_back(newEntry); |
| EXPECT_DEPRECATION_WARNINGS(MakeBindGroupLayout(bindings.data(), bindings.size()), |
| maxCount + 1); |
| } |
| |
| // Adding an extra binding of the maxed type and stage exceeds the per stage limit. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry newEntry = entry; |
| newEntry.binding = maxCount; |
| bindings.push_back(newEntry); |
| EXPECT_DEPRECATION_WARNINGS( |
| ASSERT_DEVICE_ERROR(MakeBindGroupLayout(bindings.data(), bindings.size())), |
| maxCount + 1); |
| } |
| |
| // Creating a pipeline layout from the valid BGL works. |
| TestCreatePipelineLayout(bgl, 1, true); |
| |
| // Adding an extra binding of a different type in a different BGL works |
| bgl[1] = utils::MakeBindGroupLayout(device, {otherEntry}); |
| TestCreatePipelineLayout(bgl, 2, true); |
| |
| { |
| // Adding an extra binding of the maxed type in a different stage works |
| wgpu::BindGroupLayoutEntry newEntry = entry; |
| newEntry.visibility = wgpu::ShaderStage::Fragment; |
| EXPECT_DEPRECATION_WARNING(bgl[1] = utils::MakeBindGroupLayout(device, {newEntry})); |
| TestCreatePipelineLayout(bgl, 2, true); |
| } |
| |
| // Adding an extra binding of the maxed type in a different BGL exceeds the per stage limit. |
| EXPECT_DEPRECATION_WARNING(bgl[1] = utils::MakeBindGroupLayout(device, {entry})); |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| |
| // External textures require multiple binding slots (3 sampled texture, 1 uniform buffer, 1 |
| // sampler), so ensure that these count towards the limit when combined non-external texture |
| // bindings. |
| TEST_F(BindGroupLayoutValidationTest, PerStageLimitsWithExternalTexture) { |
| struct TestInfo { |
| uint32_t maxCount; |
| uint32_t bindingsPerExternalTexture; |
| wgpu::BindGroupLayoutEntry entry; |
| wgpu::BindGroupLayoutEntry otherEntry; |
| }; |
| |
| std::array<TestInfo, 3> kTestInfos = { |
| TestInfo{kMaxSampledTexturesPerShaderStage, kSampledTexturesPerExternalTexture, |
| BGLEntryType(wgpu::TextureSampleType::Float), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxSamplersPerShaderStage, kSamplersPerExternalTexture, |
| BGLEntryType(wgpu::SamplerBindingType::Filtering), |
| BGLEntryType(wgpu::BufferBindingType::Uniform)}, |
| TestInfo{kMaxUniformBuffersPerShaderStage, kUniformsPerExternalTexture, |
| BGLEntryType(wgpu::BufferBindingType::Uniform), |
| BGLEntryType(wgpu::TextureSampleType::Float)}, |
| }; |
| |
| for (TestInfo info : kTestInfos) { |
| wgpu::BindGroupLayout bgl[2]; |
| std::vector<utils::BindingLayoutEntryInitializationHelper> maxBindings; |
| |
| // Create an external texture binding layout entry |
| wgpu::BindGroupLayoutEntry entry = BGLEntryType(&utils::kExternalTextureBindingLayout); |
| entry.binding = 0; |
| maxBindings.push_back(entry); |
| |
| // Create the other bindings such that we reach the max bindings per stage when including |
| // the external texture. |
| for (uint32_t i = 1; i <= info.maxCount - info.bindingsPerExternalTexture; ++i) { |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = i; |
| maxBindings.push_back(entry); |
| } |
| |
| // Ensure that creation without the external texture works. |
| bgl[0] = MakeBindGroupLayout(maxBindings.data(), maxBindings.size()); |
| |
| // Adding an extra binding of a different type works. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.otherEntry; |
| entry.binding = info.maxCount; |
| bindings.push_back(entry); |
| MakeBindGroupLayout(bindings.data(), bindings.size()); |
| } |
| |
| // Adding an extra binding of the maxed type in a different stage works |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = info.maxCount; |
| entry.visibility = wgpu::ShaderStage::Fragment; |
| bindings.push_back(entry); |
| MakeBindGroupLayout(bindings.data(), bindings.size()); |
| } |
| |
| // Adding an extra binding of the maxed type and stage exceeds the per stage limit. |
| { |
| std::vector<utils::BindingLayoutEntryInitializationHelper> bindings = maxBindings; |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.binding = info.maxCount; |
| bindings.push_back(entry); |
| ASSERT_DEVICE_ERROR(MakeBindGroupLayout(bindings.data(), bindings.size())); |
| } |
| |
| // Creating a pipeline layout from the valid BGL works. |
| TestCreatePipelineLayout(bgl, 1, true); |
| |
| // Adding an extra binding of a different type in a different BGL works |
| bgl[1] = utils::MakeBindGroupLayout(device, {info.otherEntry}); |
| TestCreatePipelineLayout(bgl, 2, true); |
| |
| { |
| // Adding an extra binding of the maxed type in a different stage works |
| wgpu::BindGroupLayoutEntry entry = info.entry; |
| entry.visibility = wgpu::ShaderStage::Fragment; |
| bgl[1] = utils::MakeBindGroupLayout(device, {entry}); |
| TestCreatePipelineLayout(bgl, 2, true); |
| } |
| |
| // Adding an extra binding of the maxed type in a different BGL exceeds the per stage limit. |
| bgl[1] = utils::MakeBindGroupLayout(device, {info.entry}); |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| } |
| |
| // Check that dynamic buffer numbers exceed maximum value in one bind group layout. |
| TEST_F(BindGroupLayoutValidationTest, DynamicBufferNumberLimit) { |
| wgpu::BindGroupLayout bgl[2]; |
| std::vector<wgpu::BindGroupLayoutEntry> maxUniformDB; |
| std::vector<wgpu::BindGroupLayoutEntry> maxStorageDB; |
| std::vector<wgpu::BindGroupLayoutEntry> maxReadonlyStorageDB; |
| |
| // In this test, we use all the same shader stage. Ensure that this does not exceed the |
| // per-stage limit. |
| static_assert(kMaxDynamicUniformBuffersPerPipelineLayout <= kMaxUniformBuffersPerShaderStage, |
| ""); |
| static_assert(kMaxDynamicStorageBuffersPerPipelineLayout <= kMaxStorageBuffersPerShaderStage, |
| ""); |
| |
| for (uint32_t i = 0; i < kMaxDynamicUniformBuffersPerPipelineLayout; ++i) { |
| maxUniformDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true)); |
| } |
| |
| for (uint32_t i = 0; i < kMaxDynamicStorageBuffersPerPipelineLayout; ++i) { |
| maxStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage, true)); |
| } |
| |
| for (uint32_t i = 0; i < kMaxDynamicStorageBuffersPerPipelineLayout; ++i) { |
| maxReadonlyStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| i, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true)); |
| } |
| |
| // Test creating with the maxes works |
| { |
| bgl[0] = MakeBindGroupLayout(maxUniformDB.data(), maxUniformDB.size()); |
| TestCreatePipelineLayout(bgl, 1, true); |
| |
| bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size()); |
| TestCreatePipelineLayout(bgl, 1, true); |
| |
| bgl[0] = MakeBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size()); |
| TestCreatePipelineLayout(bgl, 1, true); |
| } |
| |
| // The following tests exceed the per-pipeline layout limits. We use the Fragment stage to |
| // ensure we don't hit the per-stage limit. |
| |
| // Check dynamic uniform buffers exceed maximum in pipeline layout. |
| { |
| bgl[0] = MakeBindGroupLayout(maxUniformDB.data(), maxUniformDB.size()); |
| bgl[1] = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform, true}, |
| }); |
| |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| |
| // Check dynamic storage buffers exceed maximum in pipeline layout |
| { |
| bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size()); |
| bgl[1] = utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage, true}, |
| }); |
| |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| |
| // Check dynamic readonly storage buffers exceed maximum in pipeline layout |
| { |
| bgl[0] = MakeBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size()); |
| bgl[1] = utils::MakeBindGroupLayout( |
| device, |
| { |
| {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true}, |
| }); |
| |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| |
| // Check dynamic storage buffers + dynamic readonly storage buffers exceed maximum storage |
| // buffers in pipeline layout |
| { |
| bgl[0] = MakeBindGroupLayout(maxStorageDB.data(), maxStorageDB.size()); |
| bgl[1] = utils::MakeBindGroupLayout( |
| device, |
| { |
| {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage, true}, |
| }); |
| |
| TestCreatePipelineLayout(bgl, 2, false); |
| } |
| |
| // Check dynamic uniform buffers exceed maximum in bind group layout. |
| { |
| maxUniformDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| kMaxDynamicUniformBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform, true)); |
| TestCreateBindGroupLayout(maxUniformDB.data(), maxUniformDB.size(), false); |
| } |
| |
| // Check dynamic storage buffers exceed maximum in bind group layout. |
| { |
| maxStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| kMaxDynamicStorageBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage, true)); |
| TestCreateBindGroupLayout(maxStorageDB.data(), maxStorageDB.size(), false); |
| } |
| |
| // Check dynamic readonly storage buffers exceed maximum in bind group layout. |
| { |
| maxReadonlyStorageDB.push_back(utils::BindingLayoutEntryInitializationHelper( |
| kMaxDynamicStorageBuffersPerPipelineLayout, wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage, true)); |
| TestCreateBindGroupLayout(maxReadonlyStorageDB.data(), maxReadonlyStorageDB.size(), false); |
| } |
| } |
| |
| // Test that multisampled textures must be 2D sampled textures |
| TEST_F(BindGroupLayoutValidationTest, MultisampledTextureViewDimension) { |
| // Multisampled 2D texture works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D, true}, |
| }); |
| |
| // Multisampled 2D (defaulted) texture works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::Undefined, true}, |
| }); |
| |
| // Multisampled 2D array texture is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2DArray, true}, |
| })); |
| |
| // Multisampled cube texture is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::Cube, true}, |
| })); |
| |
| // Multisampled cube array texture is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::CubeArray, true}, |
| })); |
| |
| // Multisampled 3D texture is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e3D, true}, |
| })); |
| |
| // Multisampled 1D texture is invalid. |
| ASSERT_DEVICE_ERROR(utils::MakeBindGroupLayout( |
| device, { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e1D, true}, |
| })); |
| } |
| |
| // Test that multisampled texture bindings are valid |
| TEST_F(BindGroupLayoutValidationTest, MultisampledTextureSampleType) { |
| // Multisampled float sample type works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D, true}, |
| }); |
| |
| // Multisampled uint sample type works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Uint, |
| wgpu::TextureViewDimension::e2D, true}, |
| }); |
| |
| // Multisampled sint sample type works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Sint, |
| wgpu::TextureViewDimension::e2D, true}, |
| }); |
| |
| // Multisampled depth sample type works. |
| utils::MakeBindGroupLayout(device, |
| { |
| {0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Depth, |
| wgpu::TextureViewDimension::e2D, true}, |
| }); |
| } |
| |
| constexpr uint64_t kBufferSize = 3 * kMinUniformBufferOffsetAlignment + 8; |
| constexpr uint32_t kBindingSize = 9; |
| |
| class SetBindGroupValidationTest : public ValidationTest { |
| public: |
| void SetUp() override { |
| ValidationTest::SetUp(); |
| |
| mBindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform, true}, |
| {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform, false}, |
| {2, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage, true}, |
| {3, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage, true}}); |
| } |
| |
| wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) { |
| wgpu::BufferDescriptor bufferDescriptor; |
| bufferDescriptor.size = bufferSize; |
| bufferDescriptor.usage = usage; |
| |
| return device.CreateBuffer(&bufferDescriptor); |
| } |
| |
| wgpu::BindGroupLayout mBindGroupLayout; |
| |
| wgpu::RenderPipeline CreateRenderPipeline() { |
| wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"( |
| [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> { |
| return vec4<f32>(); |
| })"); |
| |
| wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"( |
| [[block]] struct S { |
| value : vec2<f32>; |
| }; |
| |
| [[group(0), binding(0)]] var<uniform> uBufferDynamic : S; |
| [[group(0), binding(1)]] var<uniform> uBuffer : S; |
| [[group(0), binding(2)]] var<storage, read_write> sBufferDynamic : S; |
| [[group(0), binding(3)]] var<storage, read> sReadonlyBufferDynamic : S; |
| |
| [[stage(fragment)]] fn main() { |
| })"); |
| |
| utils::ComboRenderPipelineDescriptor pipelineDescriptor; |
| pipelineDescriptor.vertex.module = vsModule; |
| pipelineDescriptor.cFragment.module = fsModule; |
| pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None; |
| wgpu::PipelineLayout pipelineLayout = |
| utils::MakeBasicPipelineLayout(device, &mBindGroupLayout); |
| pipelineDescriptor.layout = pipelineLayout; |
| return device.CreateRenderPipeline(&pipelineDescriptor); |
| } |
| |
| wgpu::ComputePipeline CreateComputePipeline() { |
| wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"( |
| [[block]] struct S { |
| value : vec2<f32>; |
| }; |
| |
| [[group(0), binding(0)]] var<uniform> uBufferDynamic : S; |
| [[group(0), binding(1)]] var<uniform> uBuffer : S; |
| [[group(0), binding(2)]] var<storage, read_write> sBufferDynamic : S; |
| [[group(0), binding(3)]] var<storage, read> sReadonlyBufferDynamic : S; |
| |
| [[stage(compute), workgroup_size(4, 4, 1)]] fn main() { |
| })"); |
| |
| wgpu::PipelineLayout pipelineLayout = |
| utils::MakeBasicPipelineLayout(device, &mBindGroupLayout); |
| |
| wgpu::ComputePipelineDescriptor csDesc; |
| csDesc.layout = pipelineLayout; |
| csDesc.compute.module = csModule; |
| csDesc.compute.entryPoint = "main"; |
| |
| return device.CreateComputePipeline(&csDesc); |
| } |
| |
| void TestRenderPassBindGroup(wgpu::BindGroup bindGroup, |
| uint32_t* offsets, |
| uint32_t count, |
| bool expectation) { |
| wgpu::RenderPipeline renderPipeline = CreateRenderPipeline(); |
| DummyRenderPass renderPass(device); |
| |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); |
| renderPassEncoder.SetPipeline(renderPipeline); |
| if (bindGroup != nullptr) { |
| renderPassEncoder.SetBindGroup(0, bindGroup, count, offsets); |
| } |
| renderPassEncoder.Draw(3); |
| renderPassEncoder.EndPass(); |
| if (!expectation) { |
| ASSERT_DEVICE_ERROR(commandEncoder.Finish()); |
| } else { |
| commandEncoder.Finish(); |
| } |
| } |
| |
| void TestComputePassBindGroup(wgpu::BindGroup bindGroup, |
| uint32_t* offsets, |
| uint32_t count, |
| bool expectation) { |
| wgpu::ComputePipeline computePipeline = CreateComputePipeline(); |
| |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetPipeline(computePipeline); |
| if (bindGroup != nullptr) { |
| computePassEncoder.SetBindGroup(0, bindGroup, count, offsets); |
| } |
| computePassEncoder.Dispatch(1); |
| computePassEncoder.EndPass(); |
| if (!expectation) { |
| ASSERT_DEVICE_ERROR(commandEncoder.Finish()); |
| } else { |
| commandEncoder.Finish(); |
| } |
| } |
| }; |
| |
| // This is the test case that should work. |
| TEST_F(SetBindGroupValidationTest, Basic) { |
| // Set up the bind group. |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| std::array<uint32_t, 3> offsets = {512, 256, 0}; |
| |
| TestRenderPassBindGroup(bindGroup, offsets.data(), 3, true); |
| |
| TestComputePassBindGroup(bindGroup, offsets.data(), 3, true); |
| } |
| |
| // Draw/dispatch with a bind group missing is invalid |
| TEST_F(SetBindGroupValidationTest, MissingBindGroup) { |
| TestRenderPassBindGroup(nullptr, nullptr, 0, false); |
| TestComputePassBindGroup(nullptr, nullptr, 0, false); |
| } |
| |
| // Setting bind group after a draw / dispatch should re-verify the layout is compatible |
| TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) { |
| // Set up the bind group |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| std::array<uint32_t, 3> offsets = {512, 256, 0}; |
| |
| // Set up bind group that is incompatible |
| wgpu::BindGroupLayout invalidLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| wgpu::BindGroup invalidGroup = |
| utils::MakeBindGroup(device, invalidLayout, {{0, storageBuffer, 0, kBindingSize}}); |
| |
| { |
| wgpu::ComputePipeline computePipeline = CreateComputePipeline(); |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetPipeline(computePipeline); |
| computePassEncoder.SetBindGroup(0, bindGroup, 3, offsets.data()); |
| computePassEncoder.Dispatch(1); |
| computePassEncoder.SetBindGroup(0, invalidGroup, 0, nullptr); |
| computePassEncoder.Dispatch(1); |
| computePassEncoder.EndPass(); |
| ASSERT_DEVICE_ERROR(commandEncoder.Finish()); |
| } |
| { |
| wgpu::RenderPipeline renderPipeline = CreateRenderPipeline(); |
| DummyRenderPass renderPass(device); |
| |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); |
| renderPassEncoder.SetPipeline(renderPipeline); |
| renderPassEncoder.SetBindGroup(0, bindGroup, 3, offsets.data()); |
| renderPassEncoder.Draw(3); |
| renderPassEncoder.SetBindGroup(0, invalidGroup, 0, nullptr); |
| renderPassEncoder.Draw(3); |
| renderPassEncoder.EndPass(); |
| ASSERT_DEVICE_ERROR(commandEncoder.Finish()); |
| } |
| } |
| |
| // Test cases that test dynamic offsets count mismatch with bind group layout. |
| TEST_F(SetBindGroupValidationTest, DynamicOffsetsMismatch) { |
| // Set up bind group. |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| // Number of offsets mismatch. |
| std::array<uint32_t, 4> mismatchOffsets = {768, 512, 256, 0}; |
| |
| TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 1, false); |
| TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 2, false); |
| TestRenderPassBindGroup(bindGroup, mismatchOffsets.data(), 4, false); |
| |
| TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 1, false); |
| TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 2, false); |
| TestComputePassBindGroup(bindGroup, mismatchOffsets.data(), 4, false); |
| } |
| |
| // Test cases that test dynamic offsets not aligned |
| TEST_F(SetBindGroupValidationTest, DynamicOffsetsNotAligned) { |
| // Set up bind group. |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| // Dynamic offsets are not aligned. |
| std::array<uint32_t, 3> notAlignedOffsets = {512, 128, 0}; |
| |
| TestRenderPassBindGroup(bindGroup, notAlignedOffsets.data(), 3, false); |
| |
| TestComputePassBindGroup(bindGroup, notAlignedOffsets.data(), 3, false); |
| } |
| |
| // Test cases that test dynamic uniform buffer out of bound situation. |
| TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicUniformBuffer) { |
| // Set up bind group. |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| // Dynamic offset + offset is larger than buffer size. |
| std::array<uint32_t, 3> overFlowOffsets = {1024, 256, 0}; |
| |
| TestRenderPassBindGroup(bindGroup, overFlowOffsets.data(), 3, false); |
| |
| TestComputePassBindGroup(bindGroup, overFlowOffsets.data(), 3, false); |
| } |
| |
| // Test cases that test dynamic storage buffer out of bound situation. |
| TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicStorageBuffer) { |
| // Set up bind group. |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| // Dynamic offset + offset is larger than buffer size. |
| std::array<uint32_t, 3> overFlowOffsets = {0, 256, 1024}; |
| |
| TestRenderPassBindGroup(bindGroup, overFlowOffsets.data(), 3, false); |
| |
| TestComputePassBindGroup(bindGroup, overFlowOffsets.data(), 3, false); |
| } |
| |
| // Test cases that test dynamic uniform buffer out of bound situation because of binding size. |
| TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicUniformBuffer) { |
| // Set up bind group, but binding size is larger than |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| |
| // Dynamic offset + offset isn't larger than buffer size. |
| // But with binding size, it will trigger OOB error. |
| std::array<uint32_t, 3> offsets = {768, 256, 0}; |
| |
| TestRenderPassBindGroup(bindGroup, offsets.data(), 3, false); |
| |
| TestComputePassBindGroup(bindGroup, offsets.data(), 3, false); |
| } |
| |
| // Test cases that test dynamic storage buffer out of bound situation because of binding size. |
| TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicStorageBuffer) { |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, |
| {{0, uniformBuffer, 0, kBindingSize}, |
| {1, uniformBuffer, 0, kBindingSize}, |
| {2, storageBuffer, 0, kBindingSize}, |
| {3, readonlyStorageBuffer, 0, kBindingSize}}); |
| // Dynamic offset + offset isn't larger than buffer size. |
| // But with binding size, it will trigger OOB error. |
| std::array<uint32_t, 3> offsets = {0, 256, 768}; |
| |
| TestRenderPassBindGroup(bindGroup, offsets.data(), 3, false); |
| |
| TestComputePassBindGroup(bindGroup, offsets.data(), 3, false); |
| } |
| |
| // Regression test for crbug.com/dawn/408 where dynamic offsets were applied in the wrong order. |
| // Dynamic offsets should be applied in increasing order of binding number. |
| TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) { |
| // Note: The order of the binding numbers of the bind group and bind group layout are |
| // intentionally different and not in increasing order. |
| // This test uses both storage and uniform buffers to ensure buffer bindings are sorted first by |
| // binding number before type. |
| wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout( |
| device, { |
| {3, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true}, |
| {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage, true}, |
| {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform, true}, |
| }); |
| |
| // Create buffers which are 3x, 2x, and 1x the size of the minimum buffer offset, plus 4 bytes |
| // to spare (to avoid zero-sized bindings). We will offset the bindings so they reach the very |
| // end of the buffer. Any mismatch applying too-large of an offset to a smaller buffer will hit |
| // the out-of-bounds condition during validation. |
| wgpu::Buffer buffer3x = |
| CreateBuffer(3 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage); |
| wgpu::Buffer buffer2x = |
| CreateBuffer(2 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage); |
| wgpu::Buffer buffer1x = |
| CreateBuffer(1 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Uniform); |
| wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, |
| { |
| {0, buffer3x, 0, 4}, |
| {3, buffer2x, 0, 4}, |
| {2, buffer1x, 0, 4}, |
| }); |
| |
| std::array<uint32_t, 3> offsets; |
| { |
| // Base case works. |
| offsets = {/* binding 0 */ 0, |
| /* binding 2 */ 0, |
| /* binding 3 */ 0}; |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data()); |
| computePassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| { |
| // Offset the first binding to touch the end of the buffer. Should succeed. |
| // Will fail if the offset is applied to the first or second bindings since their buffers |
| // are too small. |
| offsets = {/* binding 0 */ 3 * kMinUniformBufferOffsetAlignment, |
| /* binding 2 */ 0, |
| /* binding 3 */ 0}; |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data()); |
| computePassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| { |
| // Offset the second binding to touch the end of the buffer. Should succeed. |
| offsets = {/* binding 0 */ 0, |
| /* binding 2 */ 1 * kMinUniformBufferOffsetAlignment, |
| /* binding 3 */ 0}; |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data()); |
| computePassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| { |
| // Offset the third binding to touch the end of the buffer. Should succeed. |
| // Will fail if the offset is applied to the second binding since its buffer |
| // is too small. |
| offsets = {/* binding 0 */ 0, |
| /* binding 2 */ 0, |
| /* binding 3 */ 2 * kMinUniformBufferOffsetAlignment}; |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data()); |
| computePassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| { |
| // Offset each binding to touch the end of their buffer. Should succeed. |
| offsets = {/* binding 0 */ 3 * kMinUniformBufferOffsetAlignment, |
| /* binding 2 */ 1 * kMinUniformBufferOffsetAlignment, |
| /* binding 3 */ 2 * kMinUniformBufferOffsetAlignment}; |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass(); |
| computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data()); |
| computePassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| } |
| |
| // Test that an error is produced (and no ASSERTs fired) when using an error bindgroup in |
| // SetBindGroup |
| TEST_F(SetBindGroupValidationTest, ErrorBindGroup) { |
| // Bindgroup creation fails because not all bindings are specified. |
| wgpu::BindGroup bindGroup; |
| ASSERT_DEVICE_ERROR(bindGroup = utils::MakeBindGroup(device, mBindGroupLayout, {})); |
| |
| TestRenderPassBindGroup(bindGroup, nullptr, 0, false); |
| |
| TestComputePassBindGroup(bindGroup, nullptr, 0, false); |
| } |
| |
| class SetBindGroupPersistenceValidationTest : public ValidationTest { |
| protected: |
| void SetUp() override { |
| ValidationTest::SetUp(); |
| |
| mVsModule = utils::CreateShaderModule(device, R"( |
| [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> { |
| return vec4<f32>(); |
| })"); |
| } |
| |
| wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) { |
| wgpu::BufferDescriptor bufferDescriptor; |
| bufferDescriptor.size = bufferSize; |
| bufferDescriptor.usage = usage; |
| |
| return device.CreateBuffer(&bufferDescriptor); |
| } |
| |
| // Generates bind group layouts and a pipeline from a 2D list of binding types. |
| std::tuple<std::vector<wgpu::BindGroupLayout>, wgpu::RenderPipeline> SetUpLayoutsAndPipeline( |
| std::vector<std::vector<wgpu::BufferBindingType>> layouts) { |
| std::vector<wgpu::BindGroupLayout> bindGroupLayouts(layouts.size()); |
| |
| // Iterate through the desired bind group layouts. |
| for (uint32_t l = 0; l < layouts.size(); ++l) { |
| const auto& layout = layouts[l]; |
| std::vector<wgpu::BindGroupLayoutEntry> bindings(layout.size()); |
| |
| // Iterate through binding types and populate a list of BindGroupLayoutEntrys. |
| for (uint32_t b = 0; b < layout.size(); ++b) { |
| bindings[b] = utils::BindingLayoutEntryInitializationHelper( |
| b, wgpu::ShaderStage::Fragment, layout[b]); |
| } |
| |
| // Create the bind group layout. |
| wgpu::BindGroupLayoutDescriptor bglDescriptor; |
| bglDescriptor.entryCount = static_cast<uint32_t>(bindings.size()); |
| bglDescriptor.entries = bindings.data(); |
| bindGroupLayouts[l] = device.CreateBindGroupLayout(&bglDescriptor); |
| } |
| |
| // Create a pipeline layout from the list of bind group layouts. |
| wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor; |
| pipelineLayoutDescriptor.bindGroupLayoutCount = |
| static_cast<uint32_t>(bindGroupLayouts.size()); |
| pipelineLayoutDescriptor.bindGroupLayouts = bindGroupLayouts.data(); |
| |
| wgpu::PipelineLayout pipelineLayout = |
| device.CreatePipelineLayout(&pipelineLayoutDescriptor); |
| |
| std::stringstream ss; |
| ss << "[[block]] struct S { value : vec2<f32>; };"; |
| |
| // Build a shader which has bindings that match the pipeline layout. |
| for (uint32_t l = 0; l < layouts.size(); ++l) { |
| const auto& layout = layouts[l]; |
| |
| for (uint32_t b = 0; b < layout.size(); ++b) { |
| wgpu::BufferBindingType binding = layout[b]; |
| ss << "[[group(" << l << "), binding(" << b << ")]] "; |
| switch (binding) { |
| case wgpu::BufferBindingType::Storage: |
| ss << "var<storage, read_write> set" << l << "_binding" << b << " : S;"; |
| break; |
| case wgpu::BufferBindingType::Uniform: |
| ss << "var<uniform> set" << l << "_binding" << b << " : S;"; |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| } |
| |
| ss << "[[stage(fragment)]] fn main() {}"; |
| |
| wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, ss.str().c_str()); |
| |
| utils::ComboRenderPipelineDescriptor pipelineDescriptor; |
| pipelineDescriptor.vertex.module = mVsModule; |
| pipelineDescriptor.cFragment.module = fsModule; |
| pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None; |
| pipelineDescriptor.layout = pipelineLayout; |
| wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); |
| |
| return std::make_tuple(bindGroupLayouts, pipeline); |
| } |
| |
| private: |
| wgpu::ShaderModule mVsModule; |
| }; |
| |
| // Test it is valid to set bind groups before setting the pipeline. |
| TEST_F(SetBindGroupPersistenceValidationTest, BindGroupBeforePipeline) { |
| std::vector<wgpu::BindGroupLayout> bindGroupLayouts; |
| wgpu::RenderPipeline pipeline; |
| std::tie(bindGroupLayouts, pipeline) = SetUpLayoutsAndPipeline({{ |
| {{ |
| wgpu::BufferBindingType::Uniform, |
| wgpu::BufferBindingType::Uniform, |
| }}, |
| {{ |
| wgpu::BufferBindingType::Storage, |
| wgpu::BufferBindingType::Uniform, |
| }}, |
| }}); |
| |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| |
| wgpu::BindGroup bindGroup0 = utils::MakeBindGroup( |
| device, bindGroupLayouts[0], |
| {{0, uniformBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); |
| |
| wgpu::BindGroup bindGroup1 = utils::MakeBindGroup( |
| device, bindGroupLayouts[1], |
| {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); |
| |
| DummyRenderPass renderPass(device); |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); |
| |
| renderPassEncoder.SetBindGroup(0, bindGroup0); |
| renderPassEncoder.SetBindGroup(1, bindGroup1); |
| renderPassEncoder.SetPipeline(pipeline); |
| renderPassEncoder.Draw(3); |
| |
| renderPassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| |
| // Dawn does not have a concept of bind group inheritance though the backing APIs may. |
| // Test that it is valid to draw with bind groups that are not "inherited". They persist |
| // after a pipeline change. |
| TEST_F(SetBindGroupPersistenceValidationTest, NotVulkanInheritance) { |
| std::vector<wgpu::BindGroupLayout> bindGroupLayoutsA; |
| wgpu::RenderPipeline pipelineA; |
| std::tie(bindGroupLayoutsA, pipelineA) = SetUpLayoutsAndPipeline({{ |
| {{ |
| wgpu::BufferBindingType::Uniform, |
| wgpu::BufferBindingType::Storage, |
| }}, |
| {{ |
| wgpu::BufferBindingType::Uniform, |
| wgpu::BufferBindingType::Uniform, |
| }}, |
| }}); |
| |
| std::vector<wgpu::BindGroupLayout> bindGroupLayoutsB; |
| wgpu::RenderPipeline pipelineB; |
| std::tie(bindGroupLayoutsB, pipelineB) = SetUpLayoutsAndPipeline({{ |
| {{ |
| wgpu::BufferBindingType::Storage, |
| wgpu::BufferBindingType::Uniform, |
| }}, |
| {{ |
| wgpu::BufferBindingType::Uniform, |
| wgpu::BufferBindingType::Uniform, |
| }}, |
| }}); |
| |
| wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| |
| wgpu::BindGroup bindGroupA0 = utils::MakeBindGroup( |
| device, bindGroupLayoutsA[0], |
| {{0, uniformBuffer, 0, kBindingSize}, {1, storageBuffer, 0, kBindingSize}}); |
| |
| wgpu::BindGroup bindGroupA1 = utils::MakeBindGroup( |
| device, bindGroupLayoutsA[1], |
| {{0, uniformBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); |
| |
| wgpu::BindGroup bindGroupB0 = utils::MakeBindGroup( |
| device, bindGroupLayoutsB[0], |
| {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); |
| |
| DummyRenderPass renderPass(device); |
| wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); |
| wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); |
| |
| renderPassEncoder.SetPipeline(pipelineA); |
| renderPassEncoder.SetBindGroup(0, bindGroupA0); |
| renderPassEncoder.SetBindGroup(1, bindGroupA1); |
| renderPassEncoder.Draw(3); |
| |
| renderPassEncoder.SetPipeline(pipelineB); |
| renderPassEncoder.SetBindGroup(0, bindGroupB0); |
| // This draw is valid. |
| // Bind group 1 persists even though it is not "inherited". |
| renderPassEncoder.Draw(3); |
| |
| renderPassEncoder.EndPass(); |
| commandEncoder.Finish(); |
| } |
| |
| class BindGroupLayoutCompatibilityTest : public ValidationTest { |
| public: |
| wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) { |
| wgpu::BufferDescriptor bufferDescriptor; |
| bufferDescriptor.size = bufferSize; |
| bufferDescriptor.usage = usage; |
| |
| return device.CreateBuffer(&bufferDescriptor); |
| } |
| |
| wgpu::RenderPipeline CreateFSRenderPipeline( |
| const char* fsShader, |
| std::vector<wgpu::BindGroupLayout> bindGroupLayout) { |
| wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"( |
| [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> { |
| return vec4<f32>(); |
| })"); |
| |
| wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fsShader); |
| |
| wgpu::PipelineLayoutDescriptor descriptor; |
| descriptor.bindGroupLayoutCount = bindGroupLayout.size(); |
| descriptor.bindGroupLayouts = bindGroupLayout.data(); |
| utils::ComboRenderPipelineDescriptor pipelineDescriptor; |
| pipelineDescriptor.vertex.module = vsModule; |
| pipelineDescriptor.cFragment.module = fsModule; |
| pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None; |
| wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor); |
| pipelineDescriptor.layout = pipelineLayout; |
| return device.CreateRenderPipeline(&pipelineDescriptor); |
| } |
| |
| wgpu::RenderPipeline CreateRenderPipeline(std::vector<wgpu::BindGroupLayout> bindGroupLayouts) { |
| return CreateFSRenderPipeline(R"( |
| [[block]] struct S { |
| value : vec2<f32>; |
| }; |
| |
| [[group(0), binding(0)]] var<storage, read_write> sBufferDynamic : S; |
| [[group(1), binding(0)]] var<storage, read> sReadonlyBufferDynamic : S; |
| |
| [[stage(fragment)]] fn main() { |
| var val : vec2<f32> = sBufferDynamic.value; |
| val = sReadonlyBufferDynamic.value; |
| })", |
| std::move(bindGroupLayouts)); |
| } |
| |
| wgpu::ComputePipeline CreateComputePipeline( |
| const char* shader, |
| std::vector<wgpu::BindGroupLayout> bindGroupLayout) { |
| wgpu::ShaderModule csModule = utils::CreateShaderModule(device, shader); |
| |
| wgpu::PipelineLayoutDescriptor descriptor; |
| descriptor.bindGroupLayoutCount = bindGroupLayout.size(); |
| descriptor.bindGroupLayouts = bindGroupLayout.data(); |
| wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(&descriptor); |
| |
| wgpu::ComputePipelineDescriptor csDesc; |
| csDesc.layout = pipelineLayout; |
| csDesc.compute.module = csModule; |
| csDesc.compute.entryPoint = "main"; |
| |
| return device.CreateComputePipeline(&csDesc); |
| } |
| |
| wgpu::ComputePipeline CreateComputePipeline( |
| std::vector<wgpu::BindGroupLayout> bindGroupLayouts) { |
| return CreateComputePipeline(R"( |
| [[block]] struct S { |
| value : vec2<f32>; |
| }; |
| |
| [[group(0), binding(0)]] var<storage, read_write> sBufferDynamic : S; |
| [[group(1), binding(0)]] var<storage, read> sReadonlyBufferDynamic : S; |
| |
| [[stage(compute), workgroup_size(4, 4, 1)]] fn main() { |
| var val : vec2<f32> = sBufferDynamic.value; |
| val = sReadonlyBufferDynamic.value; |
| })", |
| std::move(bindGroupLayouts)); |
| } |
| }; |
| |
| // Test that it is valid to pass a writable storage buffer in the pipeline layout when the shader |
| // uses the binding as a readonly storage buffer. |
| TEST_F(BindGroupLayoutCompatibilityTest, RWStorageInBGLWithROStorageInShader) { |
| // Set up the bind group layout. |
| wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| |
| CreateRenderPipeline({bgl0, bgl1}); |
| |
| CreateComputePipeline({bgl0, bgl1}); |
| } |
| |
| // Test that it is invalid to pass a readonly storage buffer in the pipeline layout when the shader |
| // uses the binding as a writable storage buffer. |
| TEST_F(BindGroupLayoutCompatibilityTest, ROStorageInBGLWithRWStorageInShader) { |
| // Set up the bind group layout. |
| wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage}}); |
| wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage}}); |
| |
| ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1})); |
| |
| ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1})); |
| } |
| |
| TEST_F(BindGroupLayoutCompatibilityTest, TextureViewDimension) { |
| constexpr char kTexture2DShaderFS[] = R"( |
| [[group(0), binding(0)]] var myTexture : texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureDimensions(myTexture)); |
| })"; |
| constexpr char kTexture2DShaderCS[] = R"( |
| [[group(0), binding(0)]] var myTexture : texture_2d<f32>; |
| [[stage(compute), workgroup_size(1)]] fn main() { |
| ignore(textureDimensions(myTexture)); |
| })"; |
| |
| // Render: Test that 2D texture with 2D view dimension works |
| CreateFSRenderPipeline( |
| kTexture2DShaderFS, |
| {utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D}})}); |
| |
| // Render: Test that 2D texture with 2D array view dimension is invalid |
| ASSERT_DEVICE_ERROR(CreateFSRenderPipeline( |
| kTexture2DShaderFS, |
| {utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2DArray}})})); |
| |
| // Compute: Test that 2D texture with 2D view dimension works |
| CreateComputePipeline( |
| kTexture2DShaderCS, |
| {utils::MakeBindGroupLayout(device, |
| {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D}})}); |
| |
| // Compute: Test that 2D texture with 2D array view dimension is invalid |
| ASSERT_DEVICE_ERROR(CreateComputePipeline( |
| kTexture2DShaderCS, |
| {utils::MakeBindGroupLayout(device, |
| {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2DArray}})})); |
| |
| constexpr char kTexture2DArrayShaderFS[] = R"( |
| [[group(0), binding(0)]] var myTexture : texture_2d_array<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureDimensions(myTexture)); |
| })"; |
| constexpr char kTexture2DArrayShaderCS[] = R"( |
| [[group(0), binding(0)]] var myTexture : texture_2d_array<f32>; |
| [[stage(compute), workgroup_size(1)]] fn main() { |
| ignore(textureDimensions(myTexture)); |
| })"; |
| |
| // Render: Test that 2D texture array with 2D array view dimension works |
| CreateFSRenderPipeline( |
| kTexture2DArrayShaderFS, |
| {utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2DArray}})}); |
| |
| // Render: Test that 2D texture array with 2D view dimension is invalid |
| ASSERT_DEVICE_ERROR(CreateFSRenderPipeline( |
| kTexture2DArrayShaderFS, |
| {utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D}})})); |
| |
| // Compute: Test that 2D texture array with 2D array view dimension works |
| CreateComputePipeline( |
| kTexture2DArrayShaderCS, |
| {utils::MakeBindGroupLayout(device, |
| {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2DArray}})}); |
| |
| // Compute: Test that 2D texture array with 2D view dimension is invalid |
| ASSERT_DEVICE_ERROR(CreateComputePipeline( |
| kTexture2DArrayShaderCS, |
| {utils::MakeBindGroupLayout(device, |
| {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float, |
| wgpu::TextureViewDimension::e2D}})})); |
| } |
| |
| // Test that a bgl with an external texture is compatible with texture_external in a shader and that |
| // an error is returned when the binding in the shader does not match. |
| TEST_F(BindGroupLayoutCompatibilityTest, ExternalTextureBindGroupLayoutCompatibility) { |
| wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}}); |
| |
| // Test that an external texture binding works with a texture_external in the shader. |
| CreateFSRenderPipeline(R"( |
| [[group(0), binding(0)]] var myExternalTexture: texture_external; |
| [[stage(fragment)]] fn main() { |
| ignore(myExternalTexture); |
| })", |
| {bgl}); |
| |
| // Test that an external texture binding doesn't work with a texture_2d<f32> in the shader. |
| ASSERT_DEVICE_ERROR(CreateFSRenderPipeline(R"( |
| [[group(0), binding(0)]] var myTexture: texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(myTexture); |
| })", |
| {bgl})); |
| } |
| |
| class BindingsValidationTest : public BindGroupLayoutCompatibilityTest { |
| public: |
| void TestRenderPassBindings(const wgpu::BindGroup* bg, |
| uint32_t count, |
| wgpu::RenderPipeline pipeline, |
| bool expectation) { |
| wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); |
| DummyRenderPass dummyRenderPass(device); |
| wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&dummyRenderPass); |
| for (uint32_t i = 0; i < count; ++i) { |
| rp.SetBindGroup(i, bg[i]); |
| } |
| rp.SetPipeline(pipeline); |
| rp.Draw(3); |
| rp.EndPass(); |
| if (!expectation) { |
| ASSERT_DEVICE_ERROR(encoder.Finish()); |
| } else { |
| encoder.Finish(); |
| } |
| } |
| |
| void TestComputePassBindings(const wgpu::BindGroup* bg, |
| uint32_t count, |
| wgpu::ComputePipeline pipeline, |
| bool expectation) { |
| wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); |
| wgpu::ComputePassEncoder cp = encoder.BeginComputePass(); |
| for (uint32_t i = 0; i < count; ++i) { |
| cp.SetBindGroup(i, bg[i]); |
| } |
| cp.SetPipeline(pipeline); |
| cp.Dispatch(1); |
| cp.EndPass(); |
| if (!expectation) { |
| ASSERT_DEVICE_ERROR(encoder.Finish()); |
| } else { |
| encoder.Finish(); |
| } |
| } |
| |
| static constexpr uint32_t kBindingNum = 3; |
| }; |
| |
| // Test that it is valid to set a pipeline layout with bindings unused by the pipeline. |
| TEST_F(BindingsValidationTest, PipelineLayoutWithMoreBindingsThanPipeline) { |
| // Set up bind group layouts. |
| wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}, |
| {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform}}); |
| wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage}}); |
| wgpu::BindGroupLayout bgl2 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| |
| // pipelineLayout has unused binding set (bgl2) and unused entry in a binding set (bgl0). |
| CreateRenderPipeline({bgl0, bgl1, bgl2}); |
| |
| CreateComputePipeline({bgl0, bgl1, bgl2}); |
| } |
| |
| // Test that it is invalid to set a pipeline layout that doesn't have all necessary bindings |
| // required by the pipeline. |
| TEST_F(BindingsValidationTest, PipelineLayoutWithLessBindingsThanPipeline) { |
| // Set up bind group layout. |
| wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| |
| // missing a binding set (bgl1) in pipeline layout |
| { |
| ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0})); |
| |
| ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0})); |
| } |
| |
| // bgl1 is not missing, but it is empty |
| { |
| wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(device, {}); |
| |
| ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1})); |
| |
| ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1})); |
| } |
| |
| // bgl1 is neither missing nor empty, but it doesn't contain the necessary binding |
| { |
| wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout( |
| device, {{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform}}); |
| |
| ASSERT_DEVICE_ERROR(CreateRenderPipeline({bgl0, bgl1})); |
| |
| ASSERT_DEVICE_ERROR(CreateComputePipeline({bgl0, bgl1})); |
| } |
| } |
| |
| // Test that it is valid to set bind groups whose layout is not set in the pipeline layout. |
| // But it's invalid to set extra entry for a given bind group's layout if that layout is set in |
| // the pipeline layout. |
| TEST_F(BindingsValidationTest, BindGroupsWithMoreBindingsThanPipelineLayout) { |
| // Set up bind group layouts, buffers, bind groups, pipeline layouts and pipelines. |
| std::array<wgpu::BindGroupLayout, kBindingNum + 1> bgl; |
| std::array<wgpu::BindGroup, kBindingNum + 1> bg; |
| std::array<wgpu::Buffer, kBindingNum + 1> buffer; |
| for (uint32_t i = 0; i < kBindingNum + 1; ++i) { |
| bgl[i] = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| buffer[i] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}}); |
| } |
| |
| // Set 3 bindings (and 3 pipeline layouts) in pipeline. |
| wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({bgl[0], bgl[1], bgl[2]}); |
| wgpu::ComputePipeline computePipeline = CreateComputePipeline({bgl[0], bgl[1], bgl[2]}); |
| |
| // Comprared to pipeline layout, there is an extra bind group (bg[3]) |
| TestRenderPassBindings(bg.data(), kBindingNum + 1, renderPipeline, true); |
| |
| TestComputePassBindings(bg.data(), kBindingNum + 1, computePipeline, true); |
| |
| // If a bind group has entry (like bgl1_1 below) unused by the pipeline layout, it is invalid. |
| // Bind groups associated layout should exactly match bind group layout if that layout is |
| // set in pipeline layout. |
| bgl[1] = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::ReadOnlyStorage}, |
| {1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform}}); |
| buffer[1] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform); |
| bg[1] = utils::MakeBindGroup(device, bgl[1], {{0, buffer[1]}, {1, buffer[1]}}); |
| |
| TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false); |
| |
| TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false); |
| } |
| |
| // Test that it is invalid to set bind groups that don't have all necessary bindings required |
| // by the pipeline layout. Note that both pipeline layout and bind group have enough bindings for |
| // pipeline in the following test. |
| TEST_F(BindingsValidationTest, BindGroupsWithLessBindingsThanPipelineLayout) { |
| // Set up bind group layouts, buffers, bind groups, pipeline layouts and pipelines. |
| std::array<wgpu::BindGroupLayout, kBindingNum> bgl; |
| std::array<wgpu::BindGroup, kBindingNum> bg; |
| std::array<wgpu::Buffer, kBindingNum> buffer; |
| for (uint32_t i = 0; i < kBindingNum; ++i) { |
| bgl[i] = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Storage}}); |
| buffer[i] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage); |
| bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}}); |
| } |
| |
| wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({bgl[0], bgl[1], bgl[2]}); |
| wgpu::ComputePipeline computePipeline = CreateComputePipeline({bgl[0], bgl[1], bgl[2]}); |
| |
| // Compared to pipeline layout, a binding set (bgl2) related bind group is missing |
| TestRenderPassBindings(bg.data(), kBindingNum - 1, renderPipeline, false); |
| |
| TestComputePassBindings(bg.data(), kBindingNum - 1, computePipeline, false); |
| |
| // bgl[2] related bind group is not missing, but its bind group is empty |
| bgl[2] = utils::MakeBindGroupLayout(device, {}); |
| bg[2] = utils::MakeBindGroup(device, bgl[2], {}); |
| |
| TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false); |
| |
| TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false); |
| |
| // bgl[2] related bind group is neither missing nor empty, but it doesn't contain the necessary |
| // binding |
| bgl[2] = utils::MakeBindGroupLayout( |
| device, {{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment, |
| wgpu::BufferBindingType::Uniform}}); |
| buffer[2] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform); |
| bg[2] = utils::MakeBindGroup(device, bgl[2], {{1, buffer[2]}}); |
| |
| TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false); |
| |
| TestComputePassBindings(bg.data(), kBindingNum, computePipeline, false); |
| } |
| |
| class SamplerTypeBindingTest : public ValidationTest { |
| protected: |
| wgpu::RenderPipeline CreateFragmentPipeline(wgpu::BindGroupLayout* bindGroupLayout, |
| const char* fragmentSource) { |
| wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"( |
| [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> { |
| return vec4<f32>(); |
| })"); |
| |
| wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fragmentSource); |
| |
| utils::ComboRenderPipelineDescriptor pipelineDescriptor; |
| pipelineDescriptor.vertex.module = vsModule; |
| pipelineDescriptor.cFragment.module = fsModule; |
| pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None; |
| wgpu::PipelineLayout pipelineLayout = |
| utils::MakeBasicPipelineLayout(device, bindGroupLayout); |
| pipelineDescriptor.layout = pipelineLayout; |
| return device.CreateRenderPipeline(&pipelineDescriptor); |
| } |
| }; |
| |
| // Test that the use of sampler and comparison_sampler in the shader must match the bind group |
| // layout. |
| TEST_F(SamplerTypeBindingTest, ShaderAndBGLMatches) { |
| // Test that a filtering sampler binding works with normal sampler in the shader. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })"); |
| } |
| |
| // Test that a non-filtering sampler binding works with normal sampler in the shader. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })"); |
| } |
| |
| // Test that comparison sampler binding works with comparison sampler in the shader. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler_comparison; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })"); |
| } |
| |
| // Test that filtering sampler binding does not work with comparison sampler in the shader. |
| if (HasToggleEnabled("use_tint_generator")) { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler_comparison; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })")); |
| } |
| |
| // Test that non-filtering sampler binding does not work with comparison sampler in the shader. |
| if (HasToggleEnabled("use_tint_generator")) { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}}); |
| |
| ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler_comparison; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })")); |
| } |
| |
| // Test that comparison sampler binding does not work with normal sampler in the shader. |
| if (HasToggleEnabled("use_tint_generator")) { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}}); |
| |
| ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[stage(fragment)]] fn main() { |
| ignore(mySampler); |
| })")); |
| } |
| |
| // Test that a filtering sampler can be used to sample a float texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })"); |
| } |
| |
| // Test that a non-filtering sampler can be used to sample a float texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })"); |
| } |
| |
| // Test that a filtering sampler can be used to sample a depth texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_depth_2d; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })"); |
| } |
| |
| // Test that a non-filtering sampler can be used to sample a depth texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_depth_2d; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })"); |
| } |
| |
| // Test that a comparison sampler can be used to sample a depth texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler_comparison; |
| [[group(0), binding(1)]] var myTexture: texture_depth_2d; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSampleCompare(myTexture, mySampler, vec2<f32>(0.0, 0.0), 0.0)); |
| })"); |
| } |
| |
| // Test that a filtering sampler cannot be used to sample an unfilterable-float texture. |
| if (HasToggleEnabled("use_tint_generator")) { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::UnfilterableFloat}}); |
| |
| ASSERT_DEVICE_ERROR(CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })")); |
| } |
| |
| // Test that a non-filtering sampler can be used to sample an unfilterable-float texture. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}, |
| {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::UnfilterableFloat}}); |
| |
| CreateFragmentPipeline(&bindGroupLayout, R"( |
| [[group(0), binding(0)]] var mySampler: sampler; |
| [[group(0), binding(1)]] var myTexture: texture_2d<f32>; |
| [[stage(fragment)]] fn main() { |
| ignore(textureSample(myTexture, mySampler, vec2<f32>(0.0, 0.0))); |
| })"); |
| } |
| } |
| |
| TEST_F(SamplerTypeBindingTest, SamplerAndBindGroupMatches) { |
| // Test that sampler binding works with normal sampler. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}}); |
| } |
| |
| // Test that comparison sampler binding works with sampler w/ compare function. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}}); |
| |
| wgpu::SamplerDescriptor desc = {}; |
| desc.compare = wgpu::CompareFunction::Never; |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}); |
| } |
| |
| // Test that sampler binding does not work with sampler w/ compare function. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| wgpu::SamplerDescriptor desc; |
| desc.compare = wgpu::CompareFunction::Never; |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}})); |
| } |
| |
| // Test that comparison sampler binding does not work with normal sampler. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Comparison}}); |
| |
| wgpu::SamplerDescriptor desc = {}; |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}})); |
| } |
| |
| // Test that filtering sampler binding works with a filtering or non-filtering sampler. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering}}); |
| |
| // Test each filter member |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.minFilter = wgpu::FilterMode::Linear; |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}); |
| } |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.magFilter = wgpu::FilterMode::Linear; |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}); |
| } |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.mipmapFilter = wgpu::FilterMode::Linear; |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}}); |
| } |
| |
| // Test non-filtering sampler |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}}); |
| } |
| |
| // Test that non-filtering sampler binding does not work with a filtering sampler. |
| { |
| wgpu::BindGroupLayout bindGroupLayout = utils::MakeBindGroupLayout( |
| device, {{0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::NonFiltering}}); |
| |
| // Test each filter member |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.minFilter = wgpu::FilterMode::Linear; |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}})); |
| } |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.magFilter = wgpu::FilterMode::Linear; |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}})); |
| } |
| { |
| wgpu::SamplerDescriptor desc; |
| desc.mipmapFilter = wgpu::FilterMode::Linear; |
| ASSERT_DEVICE_ERROR( |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler(&desc)}})); |
| } |
| |
| // Test non-filtering sampler |
| utils::MakeBindGroup(device, bindGroupLayout, {{0, device.CreateSampler()}}); |
| } |
| } |