Vulkan: Support creating pipeline layout with null bind group layout
This patch adds the support of creating pipeline layout with empty
bind group layout. As it is not allowed to create `VkPipelineLayout`
with empty `VkDescriptorSetLayout` in Vulkan, an empty bind group
layout will be set internally when `nullptr` bind group layout is
used in `PipelineLayoutDescriptor`.
Bug: chromium:377836524, chromium:42241530
Test: dawn_end2end_tests
Change-Id: I55ec1990594fff58b416159130daea0baa637c21
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/214814
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Loko Kung <lokokung@google.com>
Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
diff --git a/src/dawn/native/vulkan/PipelineLayoutVk.cpp b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
index db549c4..58c97dd 100644
--- a/src/dawn/native/vulkan/PipelineLayoutVk.cpp
+++ b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
@@ -31,6 +31,8 @@
#include <utility>
#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Range.h"
+#include "dawn/common/ityp_bitset.h"
#include "dawn/native/vulkan/BindGroupLayoutVk.h"
#include "dawn/native/vulkan/DeviceVk.h"
#include "dawn/native/vulkan/FencedDeleter.h"
@@ -51,21 +53,24 @@
ResultOrError<Ref<RefCountedVkHandle<VkPipelineLayout>>> PipelineLayout::CreateVkPipelineLayout(
uint32_t internalImmediateDataSize) {
// Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
- // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
- // this constraints at the Dawn level?
- uint32_t numSetLayouts = 0;
- std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
- for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutInternalBase* bindGroupLayout = GetBindGroupLayout(setIndex);
- setLayouts[numSetLayouts] = ToBackend(bindGroupLayout)->GetHandle();
- numSetLayouts++;
+ BindGroupMask bindGroupMask = GetBindGroupLayoutsMask();
+ BindGroupIndex highestBindGroupIndex = GetHighestBitIndexPlusOne(bindGroupMask);
+ PerBindGroup<VkDescriptorSetLayout> setLayouts;
+ for (BindGroupIndex i : Range(highestBindGroupIndex)) {
+ if (bindGroupMask[i]) {
+ setLayouts[i] = ToBackend(GetBindGroupLayout(i))->GetHandle();
+ } else {
+ setLayouts[i] =
+ ToBackend(GetDevice()->GetEmptyBindGroupLayout()->GetInternalBindGroupLayout())
+ ->GetHandle();
+ }
}
VkPipelineLayoutCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.setLayoutCount = numSetLayouts;
+ createInfo.setLayoutCount = static_cast<uint32_t>(highestBindGroupIndex);
createInfo.pSetLayouts = AsVkArray(setLayouts.data());
createInfo.pushConstantRangeCount = 0;
createInfo.pPushConstantRanges = nullptr;
@@ -96,18 +101,23 @@
}
MaybeError PipelineLayout::Initialize() {
- uint32_t numSetLayouts = 0;
- std::array<const CachedObject*, kMaxBindGroups> cachedObjects;
- for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutInternalBase* bindGroupLayout = GetBindGroupLayout(setIndex);
- cachedObjects[numSetLayouts] = bindGroupLayout;
- numSetLayouts++;
+ BindGroupMask bindGroupMask = GetBindGroupLayoutsMask();
+ BindGroupIndex highestBindGroupIndex = GetHighestBitIndexPlusOne(bindGroupMask);
+ PerBindGroup<const CachedObject*> cachedObjects;
+ for (BindGroupIndex i : Range(highestBindGroupIndex)) {
+ if (bindGroupMask[i]) {
+ cachedObjects[i] = GetBindGroupLayout(i);
+ } else {
+ cachedObjects[i] = GetDevice()->GetEmptyBindGroupLayout()->GetInternalBindGroupLayout();
+ }
}
// Record bind group layout objects and user immediate data size into pipeline layout cache key.
// It represents pipeline layout base attributes and ignored future changes caused by internal
// immediate data size from pipeline.
- StreamIn(&mCacheKey, stream::Iterable(cachedObjects.data(), numSetLayouts),
+ uint32_t numSetLayoutsWithHoles =
+ static_cast<uint32_t>(GetHighestBitIndexPlusOne(bindGroupMask));
+ StreamIn(&mCacheKey, stream::Iterable(cachedObjects.data(), numSetLayoutsWithHoles),
GetImmediateDataRangeByteSize());
return {};
diff --git a/src/dawn/tests/end2end/PipelineLayoutTests.cpp b/src/dawn/tests/end2end/PipelineLayoutTests.cpp
index 70c0fc4..4af0d2f 100644
--- a/src/dawn/tests/end2end/PipelineLayoutTests.cpp
+++ b/src/dawn/tests/end2end/PipelineLayoutTests.cpp
@@ -156,6 +156,73 @@
queue.Submit(1, &commands);
}
+// Test creating a PipelineLayout with null and non-null bind group layouts work correctly.
+TEST_P(PipelineLayoutTests, PipelineLayoutCreatedWithNullBindGroupLayout) {
+ for (uint32_t nonEmptyGroupIndex = 0; nonEmptyGroupIndex <= 1; ++nonEmptyGroupIndex) {
+ std::ostringstream stream;
+ stream << "@group(" << nonEmptyGroupIndex << R"()
+ @binding(0) var<storage, read> inputData : u32;
+ @group(2) @binding(0) var<storage, read_write> outputData : u32;
+ @compute @workgroup_size(1, 1)
+ fn main() {
+ outputData = inputData;
+ }
+ )";
+
+ wgpu::ShaderModule shaderModule = utils::CreateShaderModule(device, stream.str());
+
+ // Create 3 bind group layouts with a null bind group layout.
+ std::array<wgpu::BindGroupLayout, 3> bgls = {};
+ bgls[nonEmptyGroupIndex] = utils::MakeBindGroupLayout(
+ device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+ bgls[2] = utils::MakeBindGroupLayout(
+ device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+ // Create pipeline layout with the array of bind group layouts `bgls`.
+ wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor = {};
+ pipelineLayoutDescriptor.bindGroupLayoutCount = bgls.size();
+ pipelineLayoutDescriptor.bindGroupLayouts = bgls.data();
+ wgpu::PipelineLayout pipelineLayout =
+ device.CreatePipelineLayout(&pipelineLayoutDescriptor);
+
+ wgpu::ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.compute.module = shaderModule;
+ computePipelineDescriptor.layout = pipelineLayout;
+ wgpu::ComputePipeline computePipeline =
+ device.CreateComputePipeline(&computePipelineDescriptor);
+
+ // Create and set 3 bind groups for the test. Only 2 of the 3 bind groups should be accessed
+ // inside the compute pipeline.
+ bgls[1 - nonEmptyGroupIndex] = utils::MakeBindGroupLayout(
+ device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+ wgpu::Buffer buffer0 =
+ utils::CreateBufferFromData(device, wgpu::BufferUsage::Storage, {1u});
+ wgpu::Buffer buffer1 =
+ utils::CreateBufferFromData(device, wgpu::BufferUsage::Storage, {2u});
+ wgpu::BufferDescriptor bufferDescriptor = {};
+ bufferDescriptor.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc;
+ bufferDescriptor.size = 4u;
+ wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDescriptor);
+ wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer0}});
+ wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgls[1], {{0, buffer1}});
+ wgpu::BindGroup bg2 = utils::MakeBindGroup(device, bgls[2], {{0, buffer2}});
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+ pass.SetPipeline(computePipeline);
+ pass.SetBindGroup(0, bg0);
+ pass.SetBindGroup(1, bg1);
+ pass.SetBindGroup(2, bg2);
+ pass.DispatchWorkgroups(1);
+ pass.End();
+ wgpu::CommandBuffer commands = encoder.Finish();
+ queue.Submit(1, &commands);
+
+ uint32_t expectedValue = nonEmptyGroupIndex + 1;
+ EXPECT_BUFFER_U32_EQ(expectedValue, buffer2, 0);
+ }
+}
+
DAWN_INSTANTIATE_TEST(PipelineLayoutTests,
D3D11Backend(),
D3D12Backend(),