blob: 14bf5267146679043412eadad67c658477049fd3 [file] [log] [blame]
Bryan Bernhart0363c3e2020-02-27 01:14:22 +00001// Copyright 2020 The Dawn Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "tests/DawnTest.h"
16
Brandon Jonesb6f4d532020-11-13 02:11:12 +000017#include "dawn_native/Device.h"
Bryan Bernhart04638992020-03-25 18:31:08 +000018#include "dawn_native/Toggles.h"
Bryan Bernhartcb859a22020-04-06 22:07:42 +000019#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
Bryan Bernhart0363c3e2020-02-27 01:14:22 +000020#include "dawn_native/d3d12/DeviceD3D12.h"
21#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
Bryan Bernhart4f865052020-04-10 18:43:22 +000022#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
Bryan Bernhart0363c3e2020-02-27 01:14:22 +000023#include "utils/ComboRenderPipelineDescriptor.h"
24#include "utils/WGPUHelpers.h"
25
26constexpr uint32_t kRTSize = 4;
27
Bryan Bernhart52d06272020-03-09 22:56:59 +000028// Pooling tests are required to advance the GPU completed serial to reuse heaps.
29// This requires Tick() to be called at-least |kFrameDepth| times. This constant
30// should be updated if the internals of Tick() change.
31constexpr uint32_t kFrameDepth = 2;
32
Bryan Bernhart0363c3e2020-02-27 01:14:22 +000033using namespace dawn_native::d3d12;
34
35class D3D12DescriptorHeapTests : public DawnTest {
Bryan Bernhart52d06272020-03-09 22:56:59 +000036 protected:
Austin Eng40dc5d32020-05-15 22:06:35 +000037 void SetUp() override {
38 DawnTest::SetUp();
Jiawei Shao44fc6e32021-05-26 01:04:32 +000039 DAWN_TEST_UNSUPPORTED_IF(UsesWire());
Bryan Bernhart52d06272020-03-09 22:56:59 +000040 mD3DDevice = reinterpret_cast<Device*>(device.Get());
Bryan Bernhart04638992020-03-25 18:31:08 +000041
Corentin Wallez7aec4ae2021-03-24 15:55:32 +000042 mSimpleVSModule = utils::CreateShaderModule(device, R"(
Bryan Bernhart04638992020-03-25 18:31:08 +000043
Corentin Wallez78d27e82021-04-13 10:42:44 +000044 [[stage(vertex)]] fn main(
45 [[builtin(vertex_index)]] VertexIndex : u32
46 ) -> [[builtin(position)]] vec4<f32> {
Corentin Wallezb86e45f2021-06-17 21:36:11 +000047 var pos = array<vec2<f32>, 3>(
Corentin Wallez36f19da2021-03-16 10:00:44 +000048 vec2<f32>(-1.0, 1.0),
49 vec2<f32>( 1.0, 1.0),
50 vec2<f32>(-1.0, -1.0)
51 );
Corentin Wallez78d27e82021-04-13 10:42:44 +000052 return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
Corentin Wallez36f19da2021-03-16 10:00:44 +000053 })");
54
Corentin Wallez7aec4ae2021-03-24 15:55:32 +000055 mSimpleFSModule = utils::CreateShaderModule(device, R"(
James Priced4f8c392021-12-15 13:13:26 +000056 struct U {
Corentin Wallez36f19da2021-03-16 10:00:44 +000057 color : vec4<f32>;
58 };
59 [[group(0), binding(0)]] var<uniform> colorBuffer : U;
Corentin Wallez36f19da2021-03-16 10:00:44 +000060
Corentin Wallez78d27e82021-04-13 10:42:44 +000061 [[stage(fragment)]] fn main() -> [[location(0)]] vec4<f32> {
62 return colorBuffer.color;
Corentin Wallez36f19da2021-03-16 10:00:44 +000063 })");
Bryan Bernhart04638992020-03-25 18:31:08 +000064 }
65
Corentin Wallez7119a022020-04-08 16:04:32 +000066 utils::BasicRenderPass MakeRenderPass(uint32_t width,
Bryan Bernhart04638992020-03-25 18:31:08 +000067 uint32_t height,
68 wgpu::TextureFormat format) {
69 DAWN_ASSERT(width > 0 && height > 0);
70
71 wgpu::TextureDescriptor descriptor;
72 descriptor.dimension = wgpu::TextureDimension::e2D;
73 descriptor.size.width = width;
74 descriptor.size.height = height;
shrekshaob00de7f2021-03-22 21:12:36 +000075 descriptor.size.depthOrArrayLayers = 1;
Bryan Bernhart04638992020-03-25 18:31:08 +000076 descriptor.sampleCount = 1;
77 descriptor.format = format;
78 descriptor.mipLevelCount = 1;
Corentin Wallez6b087812020-10-27 15:35:56 +000079 descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
Bryan Bernhart04638992020-03-25 18:31:08 +000080 wgpu::Texture color = device.CreateTexture(&descriptor);
81
82 return utils::BasicRenderPass(width, height, color);
83 }
84
Bryan Bernhart04638992020-03-25 18:31:08 +000085 std::array<float, 4> GetSolidColor(uint32_t n) const {
86 ASSERT(n >> 24 == 0);
87 float b = (n & 0xFF) / 255.0f;
88 float g = ((n >> 8) & 0xFF) / 255.0f;
89 float r = ((n >> 16) & 0xFF) / 255.0f;
90 return {r, g, b, 1};
Bryan Bernhart0363c3e2020-02-27 01:14:22 +000091 }
Bryan Bernhart52d06272020-03-09 22:56:59 +000092
93 Device* mD3DDevice = nullptr;
Bryan Bernhart04638992020-03-25 18:31:08 +000094
95 wgpu::ShaderModule mSimpleVSModule;
96 wgpu::ShaderModule mSimpleFSModule;
Bryan Bernhart0363c3e2020-02-27 01:14:22 +000097};
98
Bryan Bernhart4f865052020-04-10 18:43:22 +000099class DummyStagingDescriptorAllocator {
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000100 public:
Bryan Bernhart4f865052020-04-10 18:43:22 +0000101 DummyStagingDescriptorAllocator(Device* device,
102 uint32_t descriptorCount,
103 uint32_t allocationsPerHeap)
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000104 : mAllocator(device,
105 descriptorCount,
106 allocationsPerHeap * descriptorCount,
107 D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {
108 }
109
110 CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
111 dawn_native::ResultOrError<CPUDescriptorHeapAllocation> result =
112 mAllocator.AllocateCPUDescriptors();
113 return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
114 }
115
116 void Deallocate(CPUDescriptorHeapAllocation& allocation) {
117 mAllocator.Deallocate(&allocation);
118 }
119
120 private:
Bryan Bernhart4f865052020-04-10 18:43:22 +0000121 StagingDescriptorAllocator mAllocator;
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000122};
123
Bryan Bernharte25ee252020-05-18 23:25:31 +0000124// Verify the shader visible view heaps switch over within a single submit.
125TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000126 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernharte25ee252020-05-18 23:25:31 +0000127 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
128
Brandon Jones41c87d92021-05-21 05:01:38 +0000129 utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
Bryan Bernharte25ee252020-05-18 23:25:31 +0000130
131 // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
132 // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000133 renderPipelineDescriptor.vertex.module = mSimpleVSModule;
134 renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
Bryan Bernharte25ee252020-05-18 23:25:31 +0000135
Brandon Jones41c87d92021-05-21 05:01:38 +0000136 wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
Bryan Bernharte25ee252020-05-18 23:25:31 +0000137 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
138
139 Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
140 ShaderVisibleDescriptorAllocator* allocator =
141 d3dDevice->GetViewShaderVisibleDescriptorAllocator();
142 const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
143
Corentin Wallezcac14e02020-09-28 16:05:24 +0000144 const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernharte25ee252020-05-18 23:25:31 +0000145
146 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
147 {
148 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
149
150 pass.SetPipeline(renderPipeline);
151
152 std::array<float, 4> redColor = {1, 0, 0, 1};
153 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
154 device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
155
156 for (uint32_t i = 0; i < heapSize + 1; ++i) {
157 pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
158 {{0, uniformBuffer, 0, sizeof(redColor)}}));
159 pass.Draw(3);
160 }
161
162 pass.EndPass();
163 }
164
165 wgpu::CommandBuffer commands = encoder.Finish();
166 queue.Submit(1, &commands);
167
Corentin Wallezcac14e02020-09-28 16:05:24 +0000168 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
Bryan Bernharte25ee252020-05-18 23:25:31 +0000169}
170
171// Verify the shader visible sampler heaps does not switch over within a single submit.
172TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
Brandon Jones41c87d92021-05-21 05:01:38 +0000173 utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000174
175 // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating a
Bryan Bernharte25ee252020-05-18 23:25:31 +0000176 // sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
177 // because the sampler heap allocations are de-duplicated.
Corentin Wallez7aec4ae2021-03-24 15:55:32 +0000178 renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
Corentin Wallez78d27e82021-04-13 10:42:44 +0000179 [[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> {
180 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000181 })");
182
Corentin Wallez7aec4ae2021-03-24 15:55:32 +0000183 renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
Corentin Wallez36f19da2021-03-16 10:00:44 +0000184 [[group(0), binding(0)]] var sampler0 : sampler;
Corentin Wallez78d27e82021-04-13 10:42:44 +0000185 [[stage(fragment)]] fn main() -> [[location(0)]] vec4<f32> {
Ben Clayton5c4ce7b2021-10-25 15:14:23 +0000186 _ = sampler0;
Corentin Wallez78d27e82021-04-13 10:42:44 +0000187 return vec4<f32>(0.0, 0.0, 0.0, 0.0);
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000188 })");
189
Brandon Jones41c87d92021-05-21 05:01:38 +0000190 wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000191 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
192
Corentin Wallezfb2e7712021-02-05 20:26:54 +0000193 wgpu::Sampler sampler = device.CreateSampler();
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000194
195 Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000196 ShaderVisibleDescriptorAllocator* allocator =
197 d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
198 const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000199
Corentin Wallezcac14e02020-09-28 16:05:24 +0000200 const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000201
202 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
203 {
204 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
205
206 pass.SetPipeline(renderPipeline);
207
208 for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
209 pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
210 {{0, sampler}}));
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000211 pass.Draw(3);
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000212 }
213
214 pass.EndPass();
215 }
216
217 wgpu::CommandBuffer commands = encoder.Finish();
218 queue.Submit(1, &commands);
219
Corentin Wallezcac14e02020-09-28 16:05:24 +0000220 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
Bryan Bernhart0363c3e2020-02-27 01:14:22 +0000221}
222
Bryan Bernhart52d06272020-03-09 22:56:59 +0000223// Verify shader-visible heaps can be recycled for multiple submits.
224TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000225 // Use small heaps to count only pool-allocated switches.
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000226 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000227 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
228
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000229 ShaderVisibleDescriptorAllocator* allocator =
230 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000231
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000232 std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
Bryan Bernhart52d06272020-03-09 22:56:59 +0000233
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000234 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000235
Brandon Jonesb6f4d532020-11-13 02:11:12 +0000236 // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always unique.
Bryan Bernhart52d06272020-03-09 22:56:59 +0000237 for (uint32_t i = 0; i < kFrameDepth; i++) {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000238 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
239 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000240 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
241 heaps.push_back(heap);
Brandon Jonesb6f4d532020-11-13 02:11:12 +0000242 // CheckPassedSerials() will update the last internally completed serial.
Corentin Wallez6870e6d2021-04-07 18:09:21 +0000243 EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
Brandon Jonesb6f4d532020-11-13 02:11:12 +0000244 // NextSerial() will increment the last internally submitted serial.
245 EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
Bryan Bernhart52d06272020-03-09 22:56:59 +0000246 }
247
248 // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
249 // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in the
250 // check.
251 for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000252 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
253 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000254 EXPECT_TRUE(heaps.front() == heap);
255 heaps.pop_front();
Corentin Wallez6870e6d2021-04-07 18:09:21 +0000256 EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
Brandon Jonesb6f4d532020-11-13 02:11:12 +0000257 EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
Bryan Bernhart52d06272020-03-09 22:56:59 +0000258 }
259
260 EXPECT_TRUE(heaps.empty());
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000261 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000262}
263
264// Verify shader-visible heaps do not recycle in a pending submit.
265TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000266 // Use small heaps to count only pool-allocated switches.
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000267 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000268 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
269
Bryan Bernhart52d06272020-03-09 22:56:59 +0000270 constexpr uint32_t kNumOfSwitches = 5;
271
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000272 ShaderVisibleDescriptorAllocator* allocator =
273 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000274
Corentin Wallezcac14e02020-09-28 16:05:24 +0000275 const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000276
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000277 std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
Bryan Bernhart52d06272020-03-09 22:56:59 +0000278
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000279 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000280
281 // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
282 for (uint32_t i = 0; i < kNumOfSwitches; i++) {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000283 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
284 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000285 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
286 heaps.insert(heap);
287 }
288
289 // After |kNumOfSwitches|, no heaps are recycled.
Corentin Wallezcac14e02020-09-28 16:05:24 +0000290 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
291 heapSerial + HeapVersionID(kNumOfSwitches));
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000292 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000293}
294
295// Verify switching shader-visible heaps do not recycle in a pending submit but do so
296// once no longer pending.
297TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000298 // Use small heaps to count only pool-allocated switches.
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000299 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000300 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
301
Bryan Bernhart52d06272020-03-09 22:56:59 +0000302 constexpr uint32_t kNumOfSwitches = 5;
303
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000304 ShaderVisibleDescriptorAllocator* allocator =
305 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
Corentin Wallezcac14e02020-09-28 16:05:24 +0000306 const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000307
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000308 std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
Bryan Bernhart52d06272020-03-09 22:56:59 +0000309
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000310 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000311
312 // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
313 for (uint32_t i = 0; i < kNumOfSwitches; i++) {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000314 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
315 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000316 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
317 heaps.insert(heap);
318 }
319
Corentin Wallezcac14e02020-09-28 16:05:24 +0000320 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
321 heapSerial + HeapVersionID(kNumOfSwitches));
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000322 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000323
324 // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
325 for (uint32_t i = 0; i < kFrameDepth; i++) {
Corentin Wallez2ce4b902021-03-29 14:02:05 +0000326 mD3DDevice->APITick();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000327 }
328
329 // Switch-over |kNumOfSwitches| again reusing the same heaps.
330 for (uint32_t i = 0; i < kNumOfSwitches; i++) {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000331 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
332 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
Bryan Bernhart52d06272020-03-09 22:56:59 +0000333 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
334 heaps.erase(heap);
335 }
336
337 // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
Corentin Wallezcac14e02020-09-28 16:05:24 +0000338 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
339 heapSerial + HeapVersionID(kNumOfSwitches * 2));
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000340 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
Bryan Bernhart52d06272020-03-09 22:56:59 +0000341}
342
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000343// Verify shader-visible heaps do not recycle in multiple submits.
344TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
345 ShaderVisibleDescriptorAllocator* allocator =
346 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
347
Corentin Wallezcac14e02020-09-28 16:05:24 +0000348 const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000349
350 std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
351
352 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
353
354 // Growth: Allocate + Tick() and ensure heaps are always unique.
355 while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
356 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
357 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
358 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
359 heaps.insert(heap);
Corentin Wallez2ce4b902021-03-29 14:02:05 +0000360 mD3DDevice->APITick();
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000361 }
362
363 // Verify the number of switches equals the size of heaps allocated (minus the initial).
364 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
Corentin Wallezcac14e02020-09-28 16:05:24 +0000365 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
366 heapSerial + HeapVersionID(heaps.size() - 1));
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000367}
368
369// Verify shader-visible heaps do not recycle in a pending submit.
370TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
371 ShaderVisibleDescriptorAllocator* allocator =
372 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
373
Corentin Wallezcac14e02020-09-28 16:05:24 +0000374 const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000375
376 std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
377
378 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
379
380 // Growth: Allocate new heaps.
381 while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
382 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
383 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
384 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
385 heaps.insert(heap);
386 }
387
388 // Verify the number of switches equals the size of heaps allocated (minus the initial).
389 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
Corentin Wallezcac14e02020-09-28 16:05:24 +0000390 EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
391 heapSerial + HeapVersionID(heaps.size() - 1));
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000392}
393
394// Verify switching shader-visible heaps do not recycle in a pending submit but do so
395// once no longer pending.
396// Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
397TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
398 ShaderVisibleDescriptorAllocator* allocator =
399 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
400
401 std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
402
403 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
404
405 uint32_t kNumOfPooledHeaps = 5;
406 while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
407 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
408 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
409 EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
410 heaps.insert(heap);
411 }
412
413 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
414
415 // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
416 for (uint32_t i = 0; i < kFrameDepth; i++) {
Corentin Wallez2ce4b902021-03-29 14:02:05 +0000417 mD3DDevice->APITick();
Bryan Bernhartf03590a2020-07-30 21:50:32 +0000418 }
419
420 // Switch-over the pool-allocated heaps.
421 for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
422 EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
423 ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
424 EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
425 }
426
427 EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
428}
429
Bryan Bernhart04638992020-03-25 18:31:08 +0000430// Verify encoding multiple heaps worth of bindgroups.
431// Shader-visible heaps will switch out |kNumOfHeaps| times.
432TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
433 // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup that
434 // has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize| draws,
435 // the result is the arithmetic sum of the sequence after the framebuffer is blended by
436 // accumulation. By checking for this sum, we ensure each bindgroup was encoded correctly.
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000437 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhart04638992020-03-25 18:31:08 +0000438 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
439
440 utils::BasicRenderPass renderPass =
shrekshaobdc029e2021-07-19 23:27:27 +0000441 MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
Bryan Bernhart04638992020-03-25 18:31:08 +0000442
Brandon Jones41c87d92021-05-21 05:01:38 +0000443 utils::ComboRenderPipelineDescriptor pipelineDescriptor;
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000444 pipelineDescriptor.vertex.module = mSimpleVSModule;
Bryan Bernhart04638992020-03-25 18:31:08 +0000445
Corentin Wallez7aec4ae2021-03-24 15:55:32 +0000446 pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
James Priced4f8c392021-12-15 13:13:26 +0000447 struct U {
Corentin Wallez36f19da2021-03-16 10:00:44 +0000448 heapSize : f32;
Bryan Bernhart04638992020-03-25 18:31:08 +0000449 };
Corentin Wallez36f19da2021-03-16 10:00:44 +0000450 [[group(0), binding(0)]] var<uniform> buffer0 : U;
Corentin Wallez36f19da2021-03-16 10:00:44 +0000451
shrekshao4f2edf52021-08-11 21:12:36 +0000452 [[stage(fragment)]] fn main() -> [[location(0)]] vec4<f32> {
453 return vec4<f32>(buffer0.heapSize, 0.0, 0.0, 1.0);
Bryan Bernhart04638992020-03-25 18:31:08 +0000454 })");
455
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000456 wgpu::BlendState blend;
457 blend.color.operation = wgpu::BlendOperation::Add;
458 blend.color.srcFactor = wgpu::BlendFactor::One;
459 blend.color.dstFactor = wgpu::BlendFactor::One;
460 blend.alpha.operation = wgpu::BlendOperation::Add;
461 blend.alpha.srcFactor = wgpu::BlendFactor::One;
462 blend.alpha.dstFactor = wgpu::BlendFactor::One;
Bryan Bernhart04638992020-03-25 18:31:08 +0000463
shrekshaobdc029e2021-07-19 23:27:27 +0000464 pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000465 pipelineDescriptor.cTargets[0].blend = &blend;
466
Brandon Jones41c87d92021-05-21 05:01:38 +0000467 wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
Bryan Bernhart04638992020-03-25 18:31:08 +0000468
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000469 const uint32_t heapSize =
470 mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhart04638992020-03-25 18:31:08 +0000471
472 constexpr uint32_t kNumOfHeaps = 2;
473
474 const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
475
476 std::vector<wgpu::BindGroup> bindGroups;
477 for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
478 const float color = i + 1;
479 wgpu::Buffer uniformBuffer =
480 utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
481 bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
482 {{0, uniformBuffer}}));
483 }
484
485 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
486 {
487 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
488
489 pass.SetPipeline(renderPipeline);
490
491 for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
492 pass.SetBindGroup(0, bindGroups[i]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000493 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000494 }
495
496 pass.EndPass();
497 }
498
499 wgpu::CommandBuffer commands = encoder.Finish();
500 queue.Submit(1, &commands);
501
502 float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
shrekshaobdc029e2021-07-19 23:27:27 +0000503 EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
Bryan Bernhart04638992020-03-25 18:31:08 +0000504}
505
506// Verify encoding one bindgroup then a heaps worth in different submits.
507// Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
508// The first descriptor's memory will be reused when the second submit encodes |heapSize|
509// descriptors.
510TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000511 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhart04638992020-03-25 18:31:08 +0000512 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
513
Ben Clayton9ef74c52021-03-30 15:22:17 +0000514 // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000515 DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
Ben Clayton9ef74c52021-03-30 15:22:17 +0000516
Bryan Bernhart04638992020-03-25 18:31:08 +0000517 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
518
Brandon Jones41c87d92021-05-21 05:01:38 +0000519 utils::ComboRenderPipelineDescriptor pipelineDescriptor;
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000520 pipelineDescriptor.vertex.module = mSimpleVSModule;
521 pipelineDescriptor.cFragment.module = mSimpleFSModule;
522 pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
Bryan Bernhart04638992020-03-25 18:31:08 +0000523
Brandon Jones41c87d92021-05-21 05:01:38 +0000524 wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
Bryan Bernhart04638992020-03-25 18:31:08 +0000525
526 // Encode the first descriptor and submit.
527 {
528 std::array<float, 4> greenColor = {0, 1, 0, 1};
529 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
530 device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
531
532 wgpu::BindGroup bindGroup = utils::MakeBindGroup(
533 device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
534
535 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
536 {
537 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
538
539 pass.SetPipeline(renderPipeline);
540 pass.SetBindGroup(0, bindGroup);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000541 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000542 pass.EndPass();
543 }
544
545 wgpu::CommandBuffer commands = encoder.Finish();
546 queue.Submit(1, &commands);
547 }
548
549 EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
550
551 // Encode a heap worth of descriptors.
552 {
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000553 const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
554 ->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhart04638992020-03-25 18:31:08 +0000555
556 std::vector<wgpu::BindGroup> bindGroups;
557 for (uint32_t i = 0; i < heapSize - 1; i++) {
558 std::array<float, 4> fillColor = GetSolidColor(i + 1); // Avoid black
559 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
560 device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
561
562 bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
563 {{0, uniformBuffer}}));
564 }
565
566 std::array<float, 4> redColor = {1, 0, 0, 1};
567 wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
568 device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
569
570 bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
571 {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
572
573 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
574 {
575 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
576
577 pass.SetPipeline(renderPipeline);
578
579 for (uint32_t i = 0; i < heapSize; ++i) {
580 pass.SetBindGroup(0, bindGroups[i]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000581 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000582 }
583
584 pass.EndPass();
585 }
586
587 wgpu::CommandBuffer commands = encoder.Finish();
588 queue.Submit(1, &commands);
589 }
590
591 EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
592}
593
594// Verify encoding a heaps worth of bindgroups plus one more then reuse the first
595// bindgroup in the same submit.
596// Shader-visible heaps should switch out once then re-encode the first descriptor at a new offset
597// in the heap.
598TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000599 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhart04638992020-03-25 18:31:08 +0000600 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
601
602 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
603
Brandon Jones41c87d92021-05-21 05:01:38 +0000604 utils::ComboRenderPipelineDescriptor pipelineDescriptor;
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000605 pipelineDescriptor.vertex.module = mSimpleVSModule;
606 pipelineDescriptor.cFragment.module = mSimpleFSModule;
607 pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
Bryan Bernhart04638992020-03-25 18:31:08 +0000608
Brandon Jones41c87d92021-05-21 05:01:38 +0000609 wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
Bryan Bernhart04638992020-03-25 18:31:08 +0000610
611 std::array<float, 4> redColor = {1, 0, 0, 1};
612 wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
613 device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
614
615 std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
616 device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
617
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000618 const uint32_t heapSize =
619 mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhart04638992020-03-25 18:31:08 +0000620
621 for (uint32_t i = 0; i < heapSize; i++) {
622 const std::array<float, 4>& fillColor = GetSolidColor(i + 1); // Avoid black
623 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
624 device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
625 bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
626 {{0, uniformBuffer, 0, sizeof(fillColor)}}));
627 }
628
629 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
630 {
631 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
632
633 pass.SetPipeline(pipeline);
634
635 // Encode a heap worth of descriptors plus one more.
636 for (uint32_t i = 0; i < heapSize + 1; ++i) {
637 pass.SetBindGroup(0, bindGroups[i]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000638 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000639 }
640
641 // Re-encode the first bindgroup again.
642 pass.SetBindGroup(0, bindGroups[0]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000643 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000644
645 pass.EndPass();
646 }
647
648 wgpu::CommandBuffer commands = encoder.Finish();
649 queue.Submit(1, &commands);
650
651 // Make sure the first bindgroup was encoded correctly.
652 EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
653}
654
655// Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
656// first bindgroup again in the second submit.
657// Shader-visible heaps should switch out once then re-encode the
658// first descriptor at the same offset in the heap.
659TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000660 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhart04638992020-03-25 18:31:08 +0000661 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
662
663 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
664
Brandon Jones41c87d92021-05-21 05:01:38 +0000665 utils::ComboRenderPipelineDescriptor pipelineDescriptor;
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000666 pipelineDescriptor.vertex.module = mSimpleVSModule;
667 pipelineDescriptor.cFragment.module = mSimpleFSModule;
668 pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
Bryan Bernhart04638992020-03-25 18:31:08 +0000669
Brandon Jones41c87d92021-05-21 05:01:38 +0000670 wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
Bryan Bernhart04638992020-03-25 18:31:08 +0000671
672 // Encode heap worth of descriptors plus one more.
673 std::array<float, 4> redColor = {1, 0, 0, 1};
674
675 wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
676 device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
677
678 std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
679 device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
680
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000681 const uint32_t heapSize =
682 mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhart04638992020-03-25 18:31:08 +0000683
684 for (uint32_t i = 0; i < heapSize; i++) {
685 std::array<float, 4> fillColor = GetSolidColor(i + 1); // Avoid black
686 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
687 device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
688
689 bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
690 {{0, uniformBuffer, 0, sizeof(fillColor)}}));
691 }
692
693 {
694 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
695 {
696 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
697
698 pass.SetPipeline(pipeline);
699
700 for (uint32_t i = 0; i < heapSize + 1; ++i) {
701 pass.SetBindGroup(0, bindGroups[i]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000702 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000703 }
704
705 pass.EndPass();
706 }
707
708 wgpu::CommandBuffer commands = encoder.Finish();
709 queue.Submit(1, &commands);
710 }
711
712 // Re-encode the first bindgroup again.
713 {
714 std::array<float, 4> greenColor = {0, 1, 0, 1};
Corentin Wallez47a33412020-06-02 09:24:39 +0000715 queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
Bryan Bernhart04638992020-03-25 18:31:08 +0000716
717 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
718 {
719 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
720
721 pass.SetPipeline(pipeline);
722
723 pass.SetBindGroup(0, bindGroups[0]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000724 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000725
726 pass.EndPass();
727 }
728
729 wgpu::CommandBuffer commands = encoder.Finish();
730 queue.Submit(1, &commands);
731 }
732
733 // Make sure the first bindgroup was re-encoded correctly.
734 EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
735}
736
737// Verify encoding many sampler and ubo worth of bindgroups.
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000738// Shader-visible heaps should switch out |kNumOfViewHeaps| times.
Ben Clayton448e1e92021-06-25 15:44:02 +0000739TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
Jiawei Shao44fc6e32021-05-26 01:04:32 +0000740 DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000741 dawn_native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
742
Bryan Bernhart04638992020-03-25 18:31:08 +0000743 // Create a solid filled texture.
744 wgpu::TextureDescriptor descriptor;
745 descriptor.dimension = wgpu::TextureDimension::e2D;
746 descriptor.size.width = kRTSize;
747 descriptor.size.height = kRTSize;
shrekshaob00de7f2021-03-22 21:12:36 +0000748 descriptor.size.depthOrArrayLayers = 1;
Bryan Bernhart04638992020-03-25 18:31:08 +0000749 descriptor.sampleCount = 1;
750 descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
751 descriptor.mipLevelCount = 1;
Brandon Jones27e17a62021-08-10 04:07:37 +0000752 descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
Bryan Bernhart04638992020-03-25 18:31:08 +0000753 wgpu::TextureUsage::CopySrc;
754 wgpu::Texture texture = device.CreateTexture(&descriptor);
755 wgpu::TextureView textureView = texture.CreateView();
756
757 {
758 utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
759
760 utils::ComboRenderPassDescriptor renderPassDesc({textureView});
761 renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
762 renderPassDesc.cColorAttachments[0].clearColor = {0.0f, 1.0f, 0.0f, 1.0f};
Corentin Wallez1ac45652021-09-14 10:42:22 +0000763 renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
Bryan Bernhart04638992020-03-25 18:31:08 +0000764
765 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
766 auto pass = encoder.BeginRenderPass(&renderPassDesc);
767 pass.EndPass();
768
769 wgpu::CommandBuffer commandBuffer = encoder.Finish();
770 queue.Submit(1, &commandBuffer);
771
772 RGBA8 filled(0, 255, 0, 255);
773 EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
774 }
775
776 {
Brandon Jones41c87d92021-05-21 05:01:38 +0000777 utils::ComboRenderPipelineDescriptor pipelineDescriptor;
Bryan Bernhart04638992020-03-25 18:31:08 +0000778
Corentin Wallez7aec4ae2021-03-24 15:55:32 +0000779 pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
James Priced4f8c392021-12-15 13:13:26 +0000780 struct U {
Corentin Wallez36f19da2021-03-16 10:00:44 +0000781 transform : mat2x2<f32>;
782 };
783 [[group(0), binding(0)]] var<uniform> buffer0 : U;
Bryan Bernhart04638992020-03-25 18:31:08 +0000784
Corentin Wallez78d27e82021-04-13 10:42:44 +0000785 [[stage(vertex)]] fn main(
786 [[builtin(vertex_index)]] VertexIndex : u32
787 ) -> [[builtin(position)]] vec4<f32> {
Corentin Wallezb86e45f2021-06-17 21:36:11 +0000788 var pos = array<vec2<f32>, 3>(
Corentin Wallez36f19da2021-03-16 10:00:44 +0000789 vec2<f32>(-1.0, 1.0),
790 vec2<f32>( 1.0, 1.0),
791 vec2<f32>(-1.0, -1.0)
792 );
Corentin Wallez78d27e82021-04-13 10:42:44 +0000793 return vec4<f32>(buffer0.transform * (pos[VertexIndex]), 0.0, 1.0);
Corentin Wallez36f19da2021-03-16 10:00:44 +0000794 })");
Corentin Wallez7aec4ae2021-03-24 15:55:32 +0000795 pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
James Priced4f8c392021-12-15 13:13:26 +0000796 struct U {
Corentin Wallez36f19da2021-03-16 10:00:44 +0000797 color : vec4<f32>;
798 };
799 [[group(0), binding(1)]] var sampler0 : sampler;
800 [[group(0), binding(2)]] var texture0 : texture_2d<f32>;
801 [[group(0), binding(3)]] var<uniform> buffer0 : U;
802
Corentin Wallez78d27e82021-04-13 10:42:44 +0000803 [[stage(fragment)]] fn main(
James Priceeae70b72021-04-19 15:29:49 +0000804 [[builtin(position)]] FragCoord : vec4<f32>
Corentin Wallez78d27e82021-04-13 10:42:44 +0000805 ) -> [[location(0)]] vec4<f32> {
806 return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color;
Corentin Wallez36f19da2021-03-16 10:00:44 +0000807 })");
Bryan Bernhart04638992020-03-25 18:31:08 +0000808
809 utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
Brandon Jonesbff9d3a2021-03-18 02:54:27 +0000810 pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
Bryan Bernhart04638992020-03-25 18:31:08 +0000811
Brandon Jones41c87d92021-05-21 05:01:38 +0000812 wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
Bryan Bernhart04638992020-03-25 18:31:08 +0000813
814 // Encode a heap worth of descriptors |kNumOfHeaps| times.
Ben Claytone4f4a372021-06-21 07:55:57 +0000815 constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
Bryan Bernhart04638992020-03-25 18:31:08 +0000816 wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
817 device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
818
819 wgpu::SamplerDescriptor samplerDescriptor;
820 wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
821
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000822 ShaderVisibleDescriptorAllocator* viewAllocator =
823 mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000824
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000825 ShaderVisibleDescriptorAllocator* samplerAllocator =
826 mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000827
Corentin Wallezcac14e02020-09-28 16:05:24 +0000828 const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting();
829 const HeapVersionID samplerHeapSerial =
830 samplerAllocator->GetShaderVisibleHeapSerialForTesting();
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000831
832 const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000833
834 // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
835 // group. This means the count of heaps switches is determined by the total number of views
836 // to encode. Compute the number of bindgroups to encode by counting the required views for
837 // |kNumOfViewHeaps| heaps worth.
838 constexpr uint32_t kViewsPerBindGroup = 3;
839 constexpr uint32_t kNumOfViewHeaps = 5;
840
841 const uint32_t numOfEncodedBindGroups =
842 (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
843
Bryan Bernhart04638992020-03-25 18:31:08 +0000844 std::vector<wgpu::BindGroup> bindGroups;
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000845 for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
Bryan Bernhart04638992020-03-25 18:31:08 +0000846 std::array<float, 4> fillColor = GetSolidColor(i + 1); // Avoid black
847 wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
848 device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
849
Idan Raiterf434fdc2020-06-19 21:39:23 +0000850 bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
851 {{0, transformBuffer, 0, sizeof(transform)},
852 {1, sampler},
853 {2, textureView},
854 {3, uniformBuffer, 0, sizeof(fillColor)}}));
Bryan Bernhart04638992020-03-25 18:31:08 +0000855 }
856
857 std::array<float, 4> redColor = {1, 0, 0, 1};
858 wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
859 device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
860
861 bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
862 {{0, transformBuffer, 0, sizeof(transform)},
863 {1, sampler},
864 {2, textureView},
865 {3, lastUniformBuffer, 0, sizeof(redColor)}}));
866
867 wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
868 wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
869
870 pass.SetPipeline(pipeline);
871
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000872 for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
873 pass.SetBindGroup(0, bindGroups[i]);
Corentin Wallez67b1ad72020-03-31 16:21:35 +0000874 pass.Draw(3);
Bryan Bernhart04638992020-03-25 18:31:08 +0000875 }
876
877 pass.EndPass();
878
879 wgpu::CommandBuffer commands = encoder.Finish();
880 queue.Submit(1, &commands);
881
882 // Final accumulated color is result of sampled + UBO color.
883 RGBA8 filled(255, 255, 0, 255);
884 RGBA8 notFilled(0, 0, 0, 0);
885 EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
886 EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
Bryan Bernhartb46d0022020-04-23 20:36:22 +0000887
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000888 EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
889 EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
Corentin Wallezcac14e02020-09-28 16:05:24 +0000890 viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
Bryan Bernhart303a3da2020-04-30 23:19:16 +0000891
Bryan Bernharte25ee252020-05-18 23:25:31 +0000892 EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
893 EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
Bryan Bernhart04638992020-03-25 18:31:08 +0000894 }
895}
896
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000897// Verify a single allocate/deallocate.
898// One non-shader visible heap will be created.
899TEST_P(D3D12DescriptorHeapTests, Single) {
900 constexpr uint32_t kDescriptorCount = 4;
901 constexpr uint32_t kAllocationsPerHeap = 3;
Bryan Bernhart4f865052020-04-10 18:43:22 +0000902 DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000903
904 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
905 EXPECT_EQ(allocation.GetHeapIndex(), 0u);
906 EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
907
908 allocator.Deallocate(allocation);
909 EXPECT_FALSE(allocation.IsValid());
910}
911
912// Verify allocating many times causes the pool to increase in size.
913// Creates |kNumOfHeaps| non-shader visible heaps.
914TEST_P(D3D12DescriptorHeapTests, Sequential) {
915 constexpr uint32_t kDescriptorCount = 4;
916 constexpr uint32_t kAllocationsPerHeap = 3;
Bryan Bernhart4f865052020-04-10 18:43:22 +0000917 DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000918
919 // Allocate |kNumOfHeaps| worth.
920 constexpr uint32_t kNumOfHeaps = 2;
921
922 std::set<uint32_t> allocatedHeaps;
923
924 std::vector<CPUDescriptorHeapAllocation> allocations;
925 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
926 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
927 EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
928 EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
929 allocations.push_back(allocation);
930 allocatedHeaps.insert(allocation.GetHeapIndex());
931 }
932
933 EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
934
935 // Deallocate all.
936 for (CPUDescriptorHeapAllocation& allocation : allocations) {
937 allocator.Deallocate(allocation);
938 EXPECT_FALSE(allocation.IsValid());
939 }
940}
941
942// Verify that re-allocating a number of allocations < pool size, all heaps are reused.
943// Creates and reuses |kNumofHeaps| non-shader visible heaps.
944TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
945 constexpr uint32_t kDescriptorCount = 4;
946 constexpr uint32_t kAllocationsPerHeap = 25;
Bryan Bernhart4f865052020-04-10 18:43:22 +0000947 DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000948
949 constexpr uint32_t kNumofHeaps = 10;
950
951 std::list<CPUDescriptorHeapAllocation> allocations;
952 std::set<size_t> allocationPtrs;
953
954 // Allocate |kNumofHeaps| heaps worth.
955 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
956 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
957 allocations.push_back(allocation);
958 EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
959 }
960
961 // Deallocate all.
962 for (CPUDescriptorHeapAllocation& allocation : allocations) {
963 allocator.Deallocate(allocation);
964 EXPECT_FALSE(allocation.IsValid());
965 }
966
967 allocations.clear();
968
969 // Re-allocate all again.
970 std::set<size_t> reallocatedPtrs;
971 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
972 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
973 allocations.push_back(allocation);
974 EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
975 EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
976 allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
977 }
978
979 // Deallocate all again.
980 for (CPUDescriptorHeapAllocation& allocation : allocations) {
981 allocator.Deallocate(allocation);
982 EXPECT_FALSE(allocation.IsValid());
983 }
984}
985
986// Verify allocating then deallocating many times.
987TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
988 constexpr uint32_t kDescriptorCount = 4;
989 constexpr uint32_t kAllocationsPerHeap = 25;
Bryan Bernhart4f865052020-04-10 18:43:22 +0000990 DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
Bryan Bernhartcb859a22020-04-06 22:07:42 +0000991
992 std::list<CPUDescriptorHeapAllocation> list3;
993 std::list<CPUDescriptorHeapAllocation> list5;
994 std::list<CPUDescriptorHeapAllocation> allocations;
995
996 constexpr uint32_t kNumofHeaps = 2;
997
998 // Allocate |kNumofHeaps| heaps worth.
999 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
1000 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
1001 EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
1002 if (i % 3 == 0) {
1003 list3.push_back(allocation);
1004 } else {
1005 allocations.push_back(allocation);
1006 }
1007 }
1008
1009 // Deallocate every 3rd allocation.
1010 for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
1011 allocator.Deallocate(*it);
1012 }
1013
1014 // Allocate again.
1015 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
1016 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
1017 EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
1018 if (i % 5 == 0) {
1019 list5.push_back(allocation);
1020 } else {
1021 allocations.push_back(allocation);
1022 }
1023 }
1024
1025 // Deallocate every 5th allocation.
1026 for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
1027 allocator.Deallocate(*it);
1028 }
1029
1030 // Allocate again.
1031 for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
1032 CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
1033 EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
1034 allocations.push_back(allocation);
1035 }
1036
1037 // Deallocate remaining.
1038 for (CPUDescriptorHeapAllocation& allocation : allocations) {
1039 allocator.Deallocate(allocation);
1040 EXPECT_FALSE(allocation.IsValid());
1041 }
1042}
1043
Bryan Bernhart04638992020-03-25 18:31:08 +00001044DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
1045 D3D12Backend(),
1046 D3D12Backend({"use_d3d12_small_shader_visible_heap"}));