Reuse BGL validation in default layout computations.

Instead of calling validation functions directly in
PipelineLayoutBase::CreateDefault, use ValidateBGLDesc and
ValidatePipelineLayoutDesc.

  Also makes the visibility of the default layout match the aggregation as
in the WebGPU spec.

  Also makes refcounting of BGLs a bit less manual at the bottom of
CreateDefault.

  Also adds tests for minBufferBindingSize and visiblity aggregation in
the default layout computations.

Bug: dawn:527
Change-Id: I6bbd5f3de8b235dddf6cbd2bedfd34a094fcb277
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/28560
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Stephen White <senorblanco@chromium.org>
diff --git a/src/dawn_native/BindGroupLayout.h b/src/dawn_native/BindGroupLayout.h
index 958abf6..06a7653 100644
--- a/src/dawn_native/BindGroupLayout.h
+++ b/src/dawn_native/BindGroupLayout.h
@@ -32,23 +32,9 @@
 
 namespace dawn_native {
 
-    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase*,
+    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
                                                  const BindGroupLayoutDescriptor* descriptor);
 
-    MaybeError ValidateBindingTypeWithShaderStageVisibility(
-        wgpu::BindingType bindingType,
-        wgpu::ShaderStage shaderStageVisibility);
-
-    MaybeError ValidateStorageTextureFormat(DeviceBase* device,
-                                            wgpu::BindingType bindingType,
-                                            wgpu::TextureFormat storageTextureFormat);
-
-    MaybeError ValidateStorageTextureViewDimension(wgpu::BindingType bindingType,
-                                                   wgpu::TextureViewDimension dimension);
-
-    MaybeError ValidateBindingCanBeMultisampled(wgpu::BindingType bindingType,
-                                                wgpu::TextureViewDimension viewDimension);
-
     // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
     // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
     // into a packed range of |BindingIndex| integers.
diff --git a/src/dawn_native/BindingInfo.cpp b/src/dawn_native/BindingInfo.cpp
index afc2590..3343456 100644
--- a/src/dawn_native/BindingInfo.cpp
+++ b/src/dawn_native/BindingInfo.cpp
@@ -57,10 +57,6 @@
             case wgpu::BindingType::WriteonlyStorageTexture:
                 perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
                 break;
-
-            default:
-                UNREACHABLE();
-                break;
         }
 
         ASSERT(perStageBindingCountMember != nullptr);
diff --git a/src/dawn_native/PipelineLayout.cpp b/src/dawn_native/PipelineLayout.cpp
index 5392d56..6b0c598 100644
--- a/src/dawn_native/PipelineLayout.cpp
+++ b/src/dawn_native/PipelineLayout.cpp
@@ -28,33 +28,14 @@
 
         bool InferredBindGroupLayoutEntriesCompatible(const BindGroupLayoutEntry& lhs,
                                                       const BindGroupLayoutEntry& rhs) {
-            // Minimum buffer binding size excluded because we take the maximum seen across stages
-            return lhs.binding == rhs.binding && lhs.visibility == rhs.visibility &&
-                   lhs.type == rhs.type && lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
+            // Minimum buffer binding size excluded because we take the maximum seen across stages.
+            // Visibility is excluded because we take the OR across stages.
+            return lhs.binding == rhs.binding && lhs.type == rhs.type &&
+                   lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
                    lhs.multisampled == rhs.multisampled && lhs.viewDimension == rhs.viewDimension &&
                    lhs.textureComponentType == rhs.textureComponentType;
         }
 
-        wgpu::ShaderStage GetShaderStageVisibilityWithBindingType(wgpu::BindingType bindingType) {
-            // TODO(jiawei.shao@intel.com): support read-only and read-write storage textures.
-            switch (bindingType) {
-                case wgpu::BindingType::StorageBuffer:
-                case wgpu::BindingType::WriteonlyStorageTexture:
-                    return wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute;
-
-                case wgpu::BindingType::UniformBuffer:
-                case wgpu::BindingType::ReadonlyStorageBuffer:
-                case wgpu::BindingType::Sampler:
-                case wgpu::BindingType::ComparisonSampler:
-                case wgpu::BindingType::SampledTexture:
-                case wgpu::BindingType::ReadonlyStorageTexture:
-                    return wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment |
-                           wgpu::ShaderStage::Compute;
-            }
-
-            return {};
-        }
-
     }  // anonymous namespace
 
     MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
@@ -142,22 +123,7 @@
 
                     BindGroupLayoutEntry bindingSlot;
                     bindingSlot.binding = static_cast<uint32_t>(bindingNumber);
-
-                    DAWN_TRY(ValidateBindingTypeWithShaderStageVisibility(bindingInfo.type,
-                                                                          StageBit(shaderStage)));
-                    DAWN_TRY(ValidateStorageTextureFormat(device, bindingInfo.type,
-                                                          bindingInfo.storageTextureFormat));
-                    DAWN_TRY(ValidateStorageTextureViewDimension(bindingInfo.type,
-                                                                 bindingInfo.viewDimension));
-
-                    if (bindingInfo.multisampled) {
-                        DAWN_TRY(ValidateBindingCanBeMultisampled(bindingInfo.type,
-                                                                  bindingInfo.viewDimension));
-                    }
-
-                    bindingSlot.visibility =
-                        GetShaderStageVisibilityWithBindingType(bindingInfo.type);
-
+                    bindingSlot.visibility = StageBit(shaderStage);
                     bindingSlot.type = bindingInfo.type;
                     bindingSlot.hasDynamicOffset = false;
                     bindingSlot.multisampled = bindingInfo.multisampled;
@@ -181,11 +147,14 @@
                                     "not compatible with previous declaration");
                             }
 
-                            // Use the max |minBufferBindingSize| we find
+                            // Use the max |minBufferBindingSize| we find.
                             existingEntry->minBufferBindingSize =
                                 std::max(existingEntry->minBufferBindingSize,
                                          bindingSlot.minBufferBindingSize);
 
+                            // Use the OR of all the stages at which we find this binding.
+                            existingEntry->visibility |= bindingSlot.visibility;
+
                             // Already used slot, continue
                             continue;
                         }
@@ -206,36 +175,38 @@
             }
         }
 
-        DAWN_TRY(ValidateBindingCounts(bindingCounts));
-
-        ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
+        // Create the deduced BGLs, validating if they are valid.
+        ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
         for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
             BindGroupLayoutDescriptor desc = {};
             desc.entries = entryData[group].data();
             desc.entryCount = static_cast<uint32_t>(entryCounts[group]);
 
-            // We should never produce a bad descriptor.
-            ASSERT(!ValidateBindGroupLayoutDescriptor(device, &desc).IsError());
+            DAWN_TRY(ValidateBindGroupLayoutDescriptor(device, &desc));
+            DAWN_TRY_ASSIGN(bindGroupLayouts[group], device->GetOrCreateBindGroupLayout(&desc));
 
-            Ref<BindGroupLayoutBase> bgl;
-            DAWN_TRY_ASSIGN(bgl, device->GetOrCreateBindGroupLayout(&desc));
-            bindGroupLayouts[group] = bgl.Detach();
+            ASSERT(!bindGroupLayouts[group]->IsError());
         }
 
-        PipelineLayoutDescriptor desc = {};
-        desc.bindGroupLayouts = bindGroupLayouts.data();
-        desc.bindGroupLayoutCount = static_cast<uint32_t>(bindGroupLayoutCount);
-        PipelineLayoutBase* pipelineLayout = device->CreatePipelineLayout(&desc);
-        ASSERT(!pipelineLayout->IsError());
-
-        // These bind group layouts are created internally and referenced by the pipeline layout.
-        // Release the external refcount.
-        for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
-            if (bindGroupLayouts[group] != nullptr) {
-                bindGroupLayouts[group]->Release();
+        // Create the deduced pipeline layout, validating if it is valid.
+        PipelineLayoutBase* pipelineLayout = nullptr;
+        {
+            ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+            for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
+                bgls[group] = bindGroupLayouts[group].Get();
             }
+
+            PipelineLayoutDescriptor desc = {};
+            desc.bindGroupLayouts = bgls.data();
+            desc.bindGroupLayoutCount = static_cast<uint32_t>(bindGroupLayoutCount);
+
+            DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc));
+            DAWN_TRY_ASSIGN(pipelineLayout, device->GetOrCreatePipelineLayout(&desc));
+
+            ASSERT(!pipelineLayout->IsError());
         }
 
+        // Sanity check in debug that the pipeline layout is compatible with the current pipeline.
         for (const StageAndDescriptor& stage : stages) {
             const EntryPointMetadata& metadata =
                 stage.second->module->GetEntryPoint(stage.second->entryPoint, stage.first);
diff --git a/src/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp b/src/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp
index 561a86e..7fe186c 100644
--- a/src/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp
+++ b/src/tests/unittests/validation/GetBindGroupLayoutValidationTests.cpp
@@ -19,9 +19,6 @@
 
 class GetBindGroupLayoutTests : public ValidationTest {
   protected:
-    static constexpr wgpu::ShaderStage kVisibilityAll =
-        wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
-
     wgpu::RenderPipeline RenderPipelineFromFragmentShader(const char* shader) {
         wgpu::ShaderModule vsModule =
             utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
@@ -78,19 +75,21 @@
 
     wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
 
+    // The same value is returned for the same index.
     EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(0).Get());
 
-    EXPECT_EQ(pipeline.GetBindGroupLayout(1).Get(), pipeline.GetBindGroupLayout(1).Get());
-
+    // Matching bind group layouts at different indices are the same object.
     EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(1).Get());
 
-    EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(2).Get());
+    // BGLs with different bindings types are different objects.
+    EXPECT_NE(pipeline.GetBindGroupLayout(2).Get(), pipeline.GetBindGroupLayout(3).Get());
 
-    EXPECT_NE(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(3).Get());
+    // BGLs with different visibilities are different objects.
+    EXPECT_NE(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(2).Get());
 }
 
 // Test that getBindGroupLayout defaults are correct
-// - shader stage visibility is All
+// - shader stage visibility is the stage that adds the binding.
 // - dynamic offsets is false
 TEST_F(GetBindGroupLayoutTests, DefaultShaderStageAndDynamicOffsets) {
     wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
@@ -114,22 +113,19 @@
 
     // Check that visibility and dynamic offsets match
     binding.hasDynamicOffset = false;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     EXPECT_EQ(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
 
     // Check that any change in visibility doesn't match.
     binding.visibility = wgpu::ShaderStage::Vertex;
     EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
 
-    binding.visibility = wgpu::ShaderStage::Fragment;
-    EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
-
     binding.visibility = wgpu::ShaderStage::Compute;
     EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
 
     // Check that any change in hasDynamicOffsets doesn't match.
     binding.hasDynamicOffset = true;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
 }
 
@@ -154,7 +150,7 @@
     wgpu::BindGroupLayoutEntry binding = {};
     binding.binding = 0;
     binding.type = wgpu::BindingType::UniformBuffer;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Compute;
     binding.hasDynamicOffset = false;
     binding.minBufferBindingSize = 4 * sizeof(float);
 
@@ -172,6 +168,7 @@
     binding.hasDynamicOffset = false;
     binding.multisampled = false;
     binding.minBufferBindingSize = 4 * sizeof(float);
+    binding.visibility = wgpu::ShaderStage::Fragment;
 
     wgpu::BindGroupLayoutDescriptor desc = {};
     desc.entryCount = 1;
@@ -179,7 +176,7 @@
 
     {
         // Storage buffer binding is not supported in vertex shader.
-        binding.visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
+        binding.visibility = wgpu::ShaderStage::Fragment;
         binding.type = wgpu::BindingType::StorageBuffer;
         wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
         #version 450
@@ -190,8 +187,6 @@
         void main() {})");
         EXPECT_EQ(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
     }
-
-    binding.visibility = kVisibilityAll;
     {
         binding.type = wgpu::BindingType::UniformBuffer;
         wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
@@ -243,7 +238,7 @@
     wgpu::BindGroupLayoutEntry binding = {};
     binding.binding = 0;
     binding.type = wgpu::BindingType::SampledTexture;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     binding.hasDynamicOffset = false;
 
     wgpu::BindGroupLayoutDescriptor desc = {};
@@ -276,7 +271,7 @@
     wgpu::BindGroupLayoutEntry binding = {};
     binding.binding = 0;
     binding.type = wgpu::BindingType::SampledTexture;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     binding.hasDynamicOffset = false;
     binding.multisampled = false;
 
@@ -350,7 +345,7 @@
     wgpu::BindGroupLayoutEntry binding = {};
     binding.binding = 0;
     binding.type = wgpu::BindingType::SampledTexture;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     binding.hasDynamicOffset = false;
     binding.multisampled = false;
 
@@ -393,7 +388,7 @@
 TEST_F(GetBindGroupLayoutTests, BindingIndices) {
     wgpu::BindGroupLayoutEntry binding = {};
     binding.type = wgpu::BindingType::UniformBuffer;
-    binding.visibility = kVisibilityAll;
+    binding.visibility = wgpu::ShaderStage::Fragment;
     binding.hasDynamicOffset = false;
     binding.multisampled = false;
     binding.minBufferBindingSize = 4 * sizeof(float);
@@ -471,6 +466,152 @@
     device.CreateRenderPipeline(&descriptor);
 }
 
+// Test that minBufferSize is set on the BGL and that the max of the min buffer sizes is used.
+TEST_F(GetBindGroupLayoutTests, MinBufferSize) {
+    wgpu::ShaderModule vsModule4 =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform UniformBuffer {
+            float pos;
+        };
+        void main() {})");
+
+    wgpu::ShaderModule vsModule64 =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform UniformBuffer1 {
+            mat4 pos;
+        };
+        void main() {})");
+
+    wgpu::ShaderModule fsModule4 =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform UniformBuffer {
+            float pos;
+        };
+
+        void main() {})");
+
+    wgpu::ShaderModule fsModule64 =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform UniformBuffer {
+            mat4 pos;
+        };
+
+        void main() {})");
+
+    // Create BGLs with minBufferBindingSize 4 and 64.
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.type = wgpu::BindingType::UniformBuffer;
+    binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    binding.minBufferBindingSize = 4;
+    wgpu::BindGroupLayout bgl4 = device.CreateBindGroupLayout(&desc);
+    binding.minBufferBindingSize = 64;
+    wgpu::BindGroupLayout bgl64 = device.CreateBindGroupLayout(&desc);
+
+    utils::ComboRenderPipelineDescriptor descriptor(device);
+    descriptor.layout = nullptr;
+
+    // Check with both stages using 4 bytes.
+    {
+        descriptor.vertexStage.module = vsModule4;
+        descriptor.cFragmentStage.module = fsModule4;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl4.Get());
+    }
+
+    // Check that the max is taken between 4 and 64.
+    {
+        descriptor.vertexStage.module = vsModule64;
+        descriptor.cFragmentStage.module = fsModule4;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl64.Get());
+    }
+
+    // Check that the order doesn't change that the max is taken.
+    {
+        descriptor.vertexStage.module = vsModule4;
+        descriptor.cFragmentStage.module = fsModule64;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl64.Get());
+    }
+}
+
+// Test that the visibility is correctly aggregated if two stages have the exact same binding.
+TEST_F(GetBindGroupLayoutTests, StageAggregation) {
+    wgpu::ShaderModule vsModuleNoSampler =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
+        #version 450
+        void main() {})");
+
+    wgpu::ShaderModule vsModuleSampler =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform sampler mySampler;
+        void main() {})");
+
+    wgpu::ShaderModule fsModuleNoSampler =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
+        #version 450
+        void main() {})");
+
+    wgpu::ShaderModule fsModuleSampler =
+        utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
+        #version 450
+        layout(set = 0, binding = 0) uniform sampler mySampler;
+        void main() {})");
+
+    // Create BGLs with minBufferBindingSize 4 and 64.
+    wgpu::BindGroupLayoutEntry binding = {};
+    binding.binding = 0;
+    binding.type = wgpu::BindingType::Sampler;
+
+    wgpu::BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 1;
+    desc.entries = &binding;
+
+    utils::ComboRenderPipelineDescriptor descriptor(device);
+    descriptor.layout = nullptr;
+
+    // Check with only the vertex shader using the sampler
+    {
+        descriptor.vertexStage.module = vsModuleSampler;
+        descriptor.cFragmentStage.module = fsModuleNoSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Vertex;
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
+    }
+
+    // Check with only the fragment shader using the sampler
+    {
+        descriptor.vertexStage.module = vsModuleNoSampler;
+        descriptor.cFragmentStage.module = fsModuleSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Fragment;
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
+    }
+
+    // Check with both shaders using the sampler
+    {
+        descriptor.vertexStage.module = vsModuleSampler;
+        descriptor.cFragmentStage.module = fsModuleSampler;
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
+
+        binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
+        EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
+    }
+}
+
 // Test it is invalid to have conflicting binding types in the shaders.
 TEST_F(GetBindGroupLayoutTests, ConflictingBindingType) {
     wgpu::ShaderModule vsModule =
@@ -500,8 +641,7 @@
 }
 
 // Test it is invalid to have conflicting binding texture multisampling in the shaders.
-// TODO: Support multisampling
-TEST_F(GetBindGroupLayoutTests, DISABLED_ConflictingBindingTextureMultisampling) {
+TEST_F(GetBindGroupLayoutTests, ConflictingBindingTextureMultisampling) {
     wgpu::ShaderModule vsModule =
         utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
         #version 450