Implement GPUBufferDescriptor.mappedAtCreation.
This CL:
- Adds mappedAtCreation to dawn.json
- Changes dawn_native to implement CreateBufferMapped in terms of
mappedAtCreation.
- Duplicates all the CreateBufferMappedTests to mappedAtCreation tests
(both validation and end2end).
- Implements dawn_wire's mappedAtCreation in terms of
CreateBufferMapped. The reversal in dawn_wire will be done in a
follow-up CL.
Bug: dawn:445
Change-Id: I70b9fa729b1402524a6b993c3f288987eb65c6c4
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/24083
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Kai Ninomiya <kainino@chromium.org>
diff --git a/dawn.json b/dawn.json
index 1d900cb..d7e1902 100644
--- a/dawn.json
+++ b/dawn.json
@@ -229,7 +229,8 @@
"members": [
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
{"name": "usage", "type": "buffer usage"},
- {"name": "size", "type": "uint64_t"}
+ {"name": "size", "type": "uint64_t"},
+ {"name": "mapped at creation", "type": "bool", "default": "false"}
]
},
"buffer map read callback": {
diff --git a/src/dawn_native/Buffer.cpp b/src/dawn_native/Buffer.cpp
index cc1b6e7..de297a8 100644
--- a/src/dawn_native/Buffer.cpp
+++ b/src/dawn_native/Buffer.cpp
@@ -32,20 +32,21 @@
class ErrorBuffer final : public BufferBase {
public:
- ErrorBuffer(DeviceBase* device) : BufferBase(device, ObjectBase::kError) {
- }
+ ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor, ObjectBase::kError) {
+ if (descriptor->mappedAtCreation) {
+ // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+ // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+ // would make size = 1 << 32 + 1 allocate one byte.
+ bool isValidSize =
+ descriptor->size != 0 &&
+ descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
- static ErrorBuffer* MakeMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer) {
- ASSERT(mappedPointer != nullptr);
-
- ErrorBuffer* buffer = new ErrorBuffer(device);
- buffer->mFakeMappedData =
- std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[size]);
- *mappedPointer = buffer->mFakeMappedData.get();
-
- return buffer;
+ if (isValidSize) {
+ mFakeMappedData = std::unique_ptr<uint8_t[]>(new (std::nothrow)
+ uint8_t[descriptor->size]);
+ }
+ }
}
void ClearMappedData() {
@@ -58,7 +59,7 @@
return false;
}
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override {
+ MaybeError MapAtCreationImpl() override {
UNREACHABLE();
return {};
}
@@ -107,6 +108,10 @@
return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
}
+ if (descriptor->mappedAtCreation && descriptor->size % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("size must be aligned to 4 when mappedAtCreation is true");
+ }
+
return {};
}
@@ -125,8 +130,13 @@
}
}
- BufferBase::BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mState(BufferState::Unmapped) {
+ BufferBase::BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+ if (descriptor->mappedAtCreation) {
+ mState = BufferState::MappedAtCreation;
+ }
}
BufferBase::~BufferBase() {
@@ -138,15 +148,8 @@
}
// static
- BufferBase* BufferBase::MakeError(DeviceBase* device) {
- return new ErrorBuffer(device);
- }
-
- // static
- BufferBase* BufferBase::MakeErrorMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer) {
- return ErrorBuffer::MakeMapped(device, size, mappedPointer);
+ BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+ return new ErrorBuffer(device, descriptor);
}
uint64_t BufferBase::GetSize() const {
@@ -159,23 +162,19 @@
return mUsage;
}
- MaybeError BufferBase::MapAtCreation(uint8_t** mappedPointer) {
+ MaybeError BufferBase::MapAtCreation() {
ASSERT(!IsError());
- ASSERT(mappedPointer != nullptr);
-
mState = BufferState::MappedAtCreation;
// 0-sized buffers are not supposed to be written to, Return back any non-null pointer.
// Handle 0-sized buffers first so we don't try to map them in the backend.
if (mSize == 0) {
- *mappedPointer = reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
return {};
}
// Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
if (IsMapWritable()) {
- DAWN_TRY(MapAtCreationImpl(mappedPointer));
- ASSERT(*mappedPointer != nullptr);
+ DAWN_TRY(MapAtCreationImpl());
return {};
}
@@ -185,9 +184,6 @@
// many small buffers.
DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetSize()));
- ASSERT(mStagingBuffer->GetMappedPointer() != nullptr);
- *mappedPointer = reinterpret_cast<uint8_t*>(mStagingBuffer->GetMappedPointer());
-
return {};
}
@@ -338,7 +334,8 @@
if (IsError()) {
// It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
- reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
+ static_cast<ErrorBuffer*>(this)->ClearMappedData();
+ mState = BufferState::Destroyed;
}
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
@@ -377,7 +374,8 @@
if (IsError()) {
// It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
- reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
+ static_cast<ErrorBuffer*>(this)->ClearMappedData();
+ mState = BufferState::Unmapped;
}
if (GetDevice()->ConsumedError(ValidateUnmap())) {
return;
diff --git a/src/dawn_native/Buffer.h b/src/dawn_native/Buffer.h
index 3dfaa2a..b24a47e 100644
--- a/src/dawn_native/Buffer.h
+++ b/src/dawn_native/Buffer.h
@@ -42,15 +42,12 @@
public:
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
- static BufferBase* MakeError(DeviceBase* device);
- static BufferBase* MakeErrorMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer);
+ static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
uint64_t GetSize() const;
wgpu::BufferUsage GetUsage() const;
- MaybeError MapAtCreation(uint8_t** mappedPointer);
+ MaybeError MapAtCreation();
void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
MaybeError ValidateCanUseOnQueueNow() const;
@@ -69,7 +66,9 @@
void Destroy();
protected:
- BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag);
~BufferBase() override;
void DestroyInternal();
@@ -77,7 +76,7 @@
bool IsMapped() const;
private:
- virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0;
+ virtual MaybeError MapAtCreationImpl() = 0;
virtual MaybeError MapReadAsyncImpl(uint32_t serial) = 0;
virtual MaybeError MapWriteAsyncImpl(uint32_t serial) = 0;
virtual void UnmapImpl() = 0;
diff --git a/src/dawn_native/Device.cpp b/src/dawn_native/Device.cpp
index 79c441e..e871962 100644
--- a/src/dawn_native/Device.cpp
+++ b/src/dawn_native/Device.cpp
@@ -607,40 +607,29 @@
BufferBase* result = nullptr;
if (ConsumedError(CreateBufferInternal(descriptor), &result)) {
ASSERT(result == nullptr);
- return BufferBase::MakeError(this);
+ return BufferBase::MakeError(this, descriptor);
}
return result;
}
WGPUCreateBufferMappedResult DeviceBase::CreateBufferMapped(
const BufferDescriptor* descriptor) {
- BufferBase* buffer = nullptr;
- uint8_t* data = nullptr;
+ EmitDeprecationWarning(
+ "CreateBufferMapped is deprecated, use wgpu::BufferDescriptor::mappedAtCreation and "
+ "wgpu::Buffer::GetMappedRange instead");
- uint64_t size = descriptor->size;
- if (ConsumedError(CreateBufferInternal(descriptor), &buffer) ||
- ConsumedError(buffer->MapAtCreation(&data))) {
- // Map failed. Replace the buffer with an error buffer.
- if (buffer != nullptr) {
- buffer->Release();
- }
- buffer = BufferBase::MakeErrorMapped(this, size, &data);
- }
-
- ASSERT(buffer != nullptr);
- if (data == nullptr) {
- // |data| may be nullptr if there was an OOM in MakeErrorMapped.
- // Non-zero dataLength and nullptr data is used to indicate there should be
- // mapped data but the allocation failed.
- ASSERT(buffer->IsError());
- } else {
- memset(data, 0, size);
- }
+ BufferDescriptor fixedDesc = *descriptor;
+ fixedDesc.mappedAtCreation = true;
+ BufferBase* buffer = CreateBuffer(&fixedDesc);
WGPUCreateBufferMappedResult result = {};
result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
- result.data = data;
- result.dataLength = size;
+ result.data = buffer->GetMappedRange();
+ result.dataLength = descriptor->size;
+
+ if (result.data != nullptr) {
+ memset(result.data, 0, result.dataLength);
+ }
return result;
}
@@ -753,7 +742,8 @@
// For Dawn Wire
BufferBase* DeviceBase::CreateErrorBuffer() {
- return BufferBase::MakeError(this);
+ BufferDescriptor desc = {};
+ return BufferBase::MakeError(this, &desc);
}
// Other Device API methods
@@ -886,7 +876,15 @@
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
}
- return CreateBufferImpl(descriptor);
+
+ BufferBase* buffer = nullptr;
+ DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreation());
+ }
+
+ return buffer;
}
MaybeError DeviceBase::CreateComputePipelineInternal(
@@ -1112,4 +1110,4 @@
}
}
-} // namespace dawn_native
\ No newline at end of file
+} // namespace dawn_native
diff --git a/src/dawn_native/d3d12/BufferD3D12.cpp b/src/dawn_native/d3d12/BufferD3D12.cpp
index a7b8fd8..8e8a7d7 100644
--- a/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -264,9 +264,8 @@
return {};
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
+ MaybeError Buffer::MapAtCreationImpl() {
DAWN_TRY(MapInternal(true, "D3D12 map at creation"));
- *mappedPointer = static_cast<uint8_t*>(mMappedData);
return {};
}
diff --git a/src/dawn_native/d3d12/BufferD3D12.h b/src/dawn_native/d3d12/BufferD3D12.h
index a364250..3422ce2 100644
--- a/src/dawn_native/d3d12/BufferD3D12.h
+++ b/src/dawn_native/d3d12/BufferD3D12.h
@@ -55,7 +55,7 @@
void DestroyImpl() override;
bool IsMapWritable() const override;
- virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ virtual MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
MaybeError MapInternal(bool isWrite, const char* contextInfo);
diff --git a/src/dawn_native/metal/BufferMTL.h b/src/dawn_native/metal/BufferMTL.h
index f204e53..fdb2f8f 100644
--- a/src/dawn_native/metal/BufferMTL.h
+++ b/src/dawn_native/metal/BufferMTL.h
@@ -44,7 +44,7 @@
void* GetMappedPointerImpl() override;
bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ MaybeError MapAtCreationImpl() override;
void ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue);
diff --git a/src/dawn_native/metal/BufferMTL.mm b/src/dawn_native/metal/BufferMTL.mm
index 858eab6..34e50891 100644
--- a/src/dawn_native/metal/BufferMTL.mm
+++ b/src/dawn_native/metal/BufferMTL.mm
@@ -109,8 +109,7 @@
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = reinterpret_cast<uint8_t*>([mMtlBuffer contents]);
+ MaybeError Buffer::MapAtCreationImpl() {
return {};
}
diff --git a/src/dawn_native/null/DeviceNull.cpp b/src/dawn_native/null/DeviceNull.cpp
index b654347..3f5207f 100644
--- a/src/dawn_native/null/DeviceNull.cpp
+++ b/src/dawn_native/null/DeviceNull.cpp
@@ -297,8 +297,7 @@
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = mBackingData.get();
+ MaybeError Buffer::MapAtCreationImpl() {
return {};
}
diff --git a/src/dawn_native/null/DeviceNull.h b/src/dawn_native/null/DeviceNull.h
index 56cb6a8..a9b724d 100644
--- a/src/dawn_native/null/DeviceNull.h
+++ b/src/dawn_native/null/DeviceNull.h
@@ -204,7 +204,7 @@
void DestroyImpl() override;
bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ MaybeError MapAtCreationImpl() override;
void MapAsyncImplCommon(uint32_t serial, bool isWrite);
void* GetMappedPointerImpl() override;
diff --git a/src/dawn_native/opengl/BufferGL.cpp b/src/dawn_native/opengl/BufferGL.cpp
index f9839bd..40a8d75 100644
--- a/src/dawn_native/opengl/BufferGL.cpp
+++ b/src/dawn_native/opengl/BufferGL.cpp
@@ -72,12 +72,10 @@
return true;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
+ MaybeError Buffer::MapAtCreationImpl() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
mMappedData = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- *mappedPointer = reinterpret_cast<uint8_t*>(mMappedData);
return {};
}
diff --git a/src/dawn_native/opengl/BufferGL.h b/src/dawn_native/opengl/BufferGL.h
index 272af03..ee6f12e 100644
--- a/src/dawn_native/opengl/BufferGL.h
+++ b/src/dawn_native/opengl/BufferGL.h
@@ -40,7 +40,7 @@
void DestroyImpl() override;
bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
uint64_t GetAppliedSize() const;
diff --git a/src/dawn_native/opengl/TextureGL.cpp b/src/dawn_native/opengl/TextureGL.cpp
index ee64bcd..48b8fc4 100644
--- a/src/dawn_native/opengl/TextureGL.cpp
+++ b/src/dawn_native/opengl/TextureGL.cpp
@@ -303,24 +303,19 @@
ASSERT(bytesPerRow % GetFormat().blockByteSize == 0);
ASSERT(GetHeight() % GetFormat().blockHeight == 0);
- dawn_native::BufferDescriptor descriptor;
+ dawn_native::BufferDescriptor descriptor = {};
+ descriptor.mappedAtCreation = true;
+ descriptor.usage = wgpu::BufferUsage::CopySrc;
descriptor.size = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
- descriptor.nextInChain = nullptr;
- descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+
// TODO(natlee@microsoft.com): use Dynamic Uplaoder here for temp buffer
- Ref<Buffer> srcBuffer = ToBackend(device->CreateBuffer(&descriptor));
- // Call release here to prevent memory leak since CreateBuffer will up the ref count to
- // 1, then assigning to Ref<Buffer> ups the ref count to 2. Release will reduce the ref
- // count and ensure it to reach 0 when out of use.
- srcBuffer->Release();
+ Ref<Buffer> srcBuffer = AcquireRef(ToBackend(device->CreateBuffer(&descriptor)));
// Fill the buffer with clear color
- uint8_t* clearBuffer = nullptr;
- DAWN_TRY(srcBuffer->MapAtCreation(&clearBuffer));
- memset(clearBuffer, clearColor, descriptor.size);
+ memset(srcBuffer->GetMappedRange(), clearColor, descriptor.size);
srcBuffer->Unmap();
// Bind buffer and texture, and make the buffer to texture copy
diff --git a/src/dawn_native/vulkan/BufferVk.cpp b/src/dawn_native/vulkan/BufferVk.cpp
index 37758cf..67ce1ef 100644
--- a/src/dawn_native/vulkan/BufferVk.cpp
+++ b/src/dawn_native/vulkan/BufferVk.cpp
@@ -239,8 +239,7 @@
return mMemoryAllocation.GetMappedPointer() != nullptr;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = mMemoryAllocation.GetMappedPointer();
+ MaybeError Buffer::MapAtCreationImpl() {
return {};
}
diff --git a/src/dawn_native/vulkan/BufferVk.h b/src/dawn_native/vulkan/BufferVk.h
index feb64b4..1f68273 100644
--- a/src/dawn_native/vulkan/BufferVk.h
+++ b/src/dawn_native/vulkan/BufferVk.h
@@ -57,7 +57,7 @@
void DestroyImpl() override;
bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
VkBuffer mHandle = VK_NULL_HANDLE;
diff --git a/src/dawn_wire/client/Device.cpp b/src/dawn_wire/client/Device.cpp
index ce8aec45..411fd5c 100644
--- a/src/dawn_wire/client/Device.cpp
+++ b/src/dawn_wire/client/Device.cpp
@@ -146,7 +146,11 @@
}
WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
+ if (descriptor->mappedAtCreation) {
+ return CreateBufferMapped(descriptor).buffer;
+ } else {
+ return Buffer::Create(this, descriptor);
+ }
}
WGPUCreateBufferMappedResult Device::CreateBufferMapped(
diff --git a/src/tests/end2end/BufferTests.cpp b/src/tests/end2end/BufferTests.cpp
index 7163051..f010be6 100644
--- a/src/tests/end2end/BufferTests.cpp
+++ b/src/tests/end2end/BufferTests.cpp
@@ -309,7 +309,6 @@
DAWN_INSTANTIATE_TEST(BufferMapWriteTests, D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend());
-// TODO(enga): These tests should use the testing toggle to initialize resources to 1.
class CreateBufferMappedTests : public DawnTest {
protected:
static void MapReadCallback(WGPUBufferMapAsyncStatus status,
@@ -594,6 +593,273 @@
OpenGLBackend(),
VulkanBackend());
+class BufferMappedAtCreationTests : public DawnTest {
+ protected:
+ static void MapReadCallback(WGPUBufferMapAsyncStatus status,
+ const void* data,
+ uint64_t,
+ void* userdata) {
+ ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+ ASSERT_NE(nullptr, data);
+
+ static_cast<BufferMappedAtCreationTests*>(userdata)->mappedData = data;
+ }
+
+ const void* MapReadAsyncAndWait(const wgpu::Buffer& buffer) {
+ buffer.MapReadAsync(MapReadCallback, this);
+
+ while (mappedData == nullptr) {
+ WaitABit();
+ }
+
+ return mappedData;
+ }
+
+ void UnmapBuffer(const wgpu::Buffer& buffer) {
+ buffer.Unmap();
+ mappedData = nullptr;
+ }
+
+ wgpu::Buffer BufferMappedAtCreation(wgpu::BufferUsage usage, uint64_t size) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage;
+ descriptor.mappedAtCreation = true;
+ return device.CreateBuffer(&descriptor);
+ }
+
+ wgpu::Buffer BufferMappedAtCreationWithData(wgpu::BufferUsage usage,
+ const std::vector<uint32_t>& data) {
+ size_t byteLength = data.size() * sizeof(uint32_t);
+ wgpu::Buffer buffer = BufferMappedAtCreation(usage, byteLength);
+ memcpy(buffer.GetMappedRange(), data.data(), byteLength);
+ return buffer;
+ }
+
+ private:
+ const void* mappedData = nullptr;
+};
+
+// Test that the simplest mappedAtCreation works for MapWrite buffers.
+TEST_P(BufferMappedAtCreationTests, MapWriteUsageSmall) {
+ uint32_t myData = 230502;
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+ UnmapBuffer(buffer);
+ EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test that the simplest mappedAtCreation works for MapRead buffers.
+TEST_P(BufferMappedAtCreationTests, MapReadUsageSmall) {
+ uint32_t myData = 230502;
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::MapRead, {myData});
+ UnmapBuffer(buffer);
+
+ const void* mappedData = MapReadAsyncAndWait(buffer);
+ ASSERT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData));
+ UnmapBuffer(buffer);
+}
+
+// Test that the simplest mappedAtCreation works for non-mappable buffers.
+TEST_P(BufferMappedAtCreationTests, NonMappableUsageSmall) {
+ uint32_t myData = 4239;
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::CopySrc, {myData});
+ UnmapBuffer(buffer);
+
+ EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test mappedAtCreation for a large MapWrite buffer
+TEST_P(BufferMappedAtCreationTests, MapWriteUsageLarge) {
+ constexpr uint64_t kDataSize = 1000 * 1000;
+ std::vector<uint32_t> myData;
+ for (uint32_t i = 0; i < kDataSize; ++i) {
+ myData.push_back(i);
+ }
+
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+ UnmapBuffer(buffer);
+
+ EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 0, kDataSize);
+}
+
+// Test mappedAtCreation for a large MapRead buffer
+TEST_P(BufferMappedAtCreationTests, MapReadUsageLarge) {
+ constexpr uint64_t kDataSize = 1000 * 1000;
+ std::vector<uint32_t> myData;
+ for (uint32_t i = 0; i < kDataSize; ++i) {
+ myData.push_back(i);
+ }
+
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::MapRead, myData);
+ UnmapBuffer(buffer);
+
+ const void* mappedData = MapReadAsyncAndWait(buffer);
+ ASSERT_EQ(0, memcmp(mappedData, myData.data(), kDataSize * sizeof(uint32_t)));
+ UnmapBuffer(buffer);
+}
+
+// Test mappedAtCreation for a large non-mappable buffer
+TEST_P(BufferMappedAtCreationTests, NonMappableUsageLarge) {
+ constexpr uint64_t kDataSize = 1000 * 1000;
+ std::vector<uint32_t> myData;
+ for (uint32_t i = 0; i < kDataSize; ++i) {
+ myData.push_back(i);
+ }
+
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(wgpu::BufferUsage::CopySrc, {myData});
+ UnmapBuffer(buffer);
+
+ EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 0, kDataSize);
+}
+
+// Test destroying a non-mappable buffer mapped at creation.
+// This is a regression test for an issue where the D3D12 backend thought the buffer was actually
+// mapped and tried to unlock the heap residency (when actually the buffer was using a staging
+// buffer)
+TEST_P(BufferMappedAtCreationTests, DestroyNonMappableWhileMappedForCreation) {
+ wgpu::Buffer buffer = BufferMappedAtCreation(wgpu::BufferUsage::CopySrc, 4);
+ buffer.Destroy();
+}
+
+// Test destroying a mappable buffer mapped at creation.
+TEST_P(BufferMappedAtCreationTests, DestroyMappableWhileMappedForCreation) {
+ wgpu::Buffer buffer = BufferMappedAtCreation(wgpu::BufferUsage::MapRead, 4);
+ buffer.Destroy();
+}
+
+// Test that mapping a buffer is valid after mappedAtCreation and Unmap
+TEST_P(BufferMappedAtCreationTests, CreateThenMapSuccess) {
+ static uint32_t myData = 230502;
+ static uint32_t myData2 = 1337;
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+ UnmapBuffer(buffer);
+
+ EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+
+ bool done = false;
+ buffer.MapWriteAsync(
+ [](WGPUBufferMapAsyncStatus status, void* data, uint64_t, void* userdata) {
+ ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
+ ASSERT_NE(nullptr, data);
+
+ *static_cast<uint32_t*>(data) = myData2;
+ *static_cast<bool*>(userdata) = true;
+ },
+ &done);
+
+ while (!done) {
+ WaitABit();
+ }
+
+ UnmapBuffer(buffer);
+ EXPECT_BUFFER_U32_EQ(myData2, buffer, 0);
+}
+
+// Test that is is invalid to map a buffer twice when using mappedAtCreation
+TEST_P(BufferMappedAtCreationTests, CreateThenMapBeforeUnmapFailure) {
+ uint32_t myData = 230502;
+ wgpu::Buffer buffer = BufferMappedAtCreationWithData(
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, {myData});
+
+ ASSERT_DEVICE_ERROR([&]() {
+ bool done = false;
+ buffer.MapWriteAsync(
+ [](WGPUBufferMapAsyncStatus status, void* data, uint64_t, void* userdata) {
+ ASSERT_EQ(WGPUBufferMapAsyncStatus_Error, status);
+ ASSERT_EQ(nullptr, data);
+
+ *static_cast<bool*>(userdata) = true;
+ },
+ &done);
+
+ while (!done) {
+ WaitABit();
+ }
+ }());
+
+ // mappedAtCreation is unaffected by the MapWrite error.
+ UnmapBuffer(buffer);
+ EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
+}
+
+// Test that creating a zero-sized buffer mapped is allowed.
+TEST_P(BufferMappedAtCreationTests, ZeroSized) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 0;
+ descriptor.usage = wgpu::BufferUsage::Vertex;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ ASSERT_NE(nullptr, buffer.GetMappedRange());
+
+ // Check that unmapping the buffer works too.
+ UnmapBuffer(buffer);
+}
+
+// Test that creating a zero-sized mapppable buffer mapped. (it is a different code path)
+TEST_P(BufferMappedAtCreationTests, ZeroSizedMappableBuffer) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 0;
+ descriptor.usage = wgpu::BufferUsage::MapWrite;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ ASSERT_NE(nullptr, buffer.GetMappedRange());
+
+ // Check that unmapping the buffer works too.
+ UnmapBuffer(buffer);
+}
+
+// Test that creating a zero-sized error buffer mapped. (it is a different code path)
+TEST_P(BufferMappedAtCreationTests, ZeroSizedErrorBuffer) {
+ DAWN_SKIP_TEST_IF(IsDawnValidationSkipped());
+
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 0;
+ descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::Storage;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer;
+ ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&descriptor));
+
+ ASSERT_NE(nullptr, buffer.GetMappedRange());
+}
+
+// Test the result of GetMappedRange when mapped at creation.
+TEST_P(BufferMappedAtCreationTests, GetMappedRange) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 4;
+ descriptor.usage = wgpu::BufferUsage::CopyDst;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ ASSERT_EQ(buffer.GetMappedRange(), buffer.GetConstMappedRange());
+ ASSERT_NE(buffer.GetMappedRange(), nullptr);
+ buffer.Unmap();
+}
+
+// Test the result of GetMappedRange when mapped at creation for a zero-sized buffer.
+TEST_P(BufferMappedAtCreationTests, GetMappedRangeZeroSized) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 0;
+ descriptor.usage = wgpu::BufferUsage::CopyDst;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ ASSERT_EQ(buffer.GetMappedRange(), buffer.GetConstMappedRange());
+ ASSERT_NE(buffer.GetMappedRange(), nullptr);
+ buffer.Unmap();
+}
+
+DAWN_INSTANTIATE_TEST(BufferMappedAtCreationTests,
+ D3D12Backend(),
+ D3D12Backend({}, {"use_d3d12_resource_heap_tier2"}),
+ MetalBackend(),
+ OpenGLBackend(),
+ VulkanBackend());
+
class BufferTests : public DawnTest {};
// Test that creating a zero-buffer is allowed.
@@ -664,6 +930,51 @@
}
}
+// Test that a very large buffer mappedAtCreation fails gracefully.
+TEST_P(BufferTests, BufferMappedAtCreationOOM) {
+ // TODO(http://crbug.com/dawn/27): Missing support.
+ DAWN_SKIP_TEST_IF(IsOpenGL());
+ DAWN_SKIP_TEST_IF(IsAsan());
+
+ // Test non-mappable buffer
+ {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 4;
+ descriptor.usage = wgpu::BufferUsage::CopyDst;
+ descriptor.mappedAtCreation = true;
+
+ // Control: test a small buffer works.
+ device.CreateBuffer(&descriptor);
+
+ // Test an enormous buffer fails
+ descriptor.size = std::numeric_limits<uint64_t>::max();
+ ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+
+ // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+ descriptor.size = 1ull << 50;
+ ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+ }
+
+ // Test mappable buffer
+ {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 4;
+ descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+ descriptor.mappedAtCreation = true;
+
+ // Control: test a small buffer works.
+ device.CreateBuffer(&descriptor);
+
+ // Test an enormous buffer fails
+ descriptor.size = std::numeric_limits<uint64_t>::max();
+ ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+
+ // UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
+ descriptor.size = 1ull << 50;
+ ASSERT_DEVICE_ERROR(device.CreateBuffer(&descriptor));
+ }
+}
+
// Test that mapping an OOM buffer for reading fails gracefully
TEST_P(BufferTests, CreateBufferOOMMapReadAsync) {
// TODO(http://crbug.com/dawn/27): Missing support.
diff --git a/src/tests/end2end/DeviceLostTests.cpp b/src/tests/end2end/DeviceLostTests.cpp
index 40d8fe4..651252d 100644
--- a/src/tests/end2end/DeviceLostTests.cpp
+++ b/src/tests/end2end/DeviceLostTests.cpp
@@ -295,6 +295,17 @@
ASSERT_DEVICE_ERROR(device.CreateBufferMapped(&bufferDescriptor));
}
+// Test that mappedAtCreation fails after device is lost
+TEST_P(DeviceLostTest, CreateBufferMappedAtCreationFails) {
+ wgpu::BufferDescriptor bufferDescriptor;
+ bufferDescriptor.size = sizeof(float);
+ bufferDescriptor.usage = wgpu::BufferUsage::MapWrite;
+ bufferDescriptor.mappedAtCreation = true;
+
+ SetCallbackAndLoseForTesting();
+ ASSERT_DEVICE_ERROR(device.CreateBuffer(&bufferDescriptor));
+}
+
// Test that BufferMapReadAsync fails after device is lost
TEST_P(DeviceLostTest, BufferMapReadAsyncFails) {
wgpu::BufferDescriptor bufferDescriptor;
@@ -333,9 +344,7 @@
}
// Test it's possible to GetMappedRange on a buffer created mapped after device loss
-// TODO(cwallez@chromium.org): enable after CreateBufferMapped is implemented in terms of
-// mappedAtCreation.
-TEST_P(DeviceLostTest, DISABLED_GetMappedRange_CreateBufferMappedAfterLoss) {
+TEST_P(DeviceLostTest, GetMappedRange_CreateBufferMappedAfterLoss) {
SetCallbackAndLoseForTesting();
wgpu::BufferDescriptor desc;
@@ -362,6 +371,34 @@
ASSERT_EQ(result.buffer.GetMappedRange(), result.data);
}
+// Test it's possible to GetMappedRange on a buffer created mapped after device loss
+TEST_P(DeviceLostTest, GetMappedRange_CreateBufferMappedAtCreationAfterLoss) {
+ SetCallbackAndLoseForTesting();
+
+ wgpu::BufferDescriptor desc;
+ desc.size = 4;
+ desc.usage = wgpu::BufferUsage::CopySrc;
+ desc.mappedAtCreation = true;
+ ASSERT_DEVICE_ERROR(wgpu::Buffer buffer = device.CreateBuffer(&desc));
+
+ ASSERT_NE(buffer.GetMappedRange(), nullptr);
+}
+
+// Test that device loss doesn't change the result of GetMappedRange, mappedAtCreation version.
+TEST_P(DeviceLostTest, GetMappedRange_CreateBufferMappedAtCreationBeforeLoss) {
+ wgpu::BufferDescriptor desc;
+ desc.size = 4;
+ desc.usage = wgpu::BufferUsage::CopySrc;
+ desc.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+ void* rangeBeforeLoss = buffer.GetMappedRange();
+ SetCallbackAndLoseForTesting();
+
+ ASSERT_NE(buffer.GetMappedRange(), nullptr);
+ ASSERT_EQ(buffer.GetMappedRange(), rangeBeforeLoss);
+}
+
// Test that device loss doesn't change the result of GetMappedRange, mapReadAsync version.
TEST_P(DeviceLostTest, GetMappedRange_MapReadAsync) {
wgpu::BufferDescriptor desc;
diff --git a/src/tests/unittests/validation/BufferValidationTests.cpp b/src/tests/unittests/validation/BufferValidationTests.cpp
index eedd18b..82d5fba 100644
--- a/src/tests/unittests/validation/BufferValidationTests.cpp
+++ b/src/tests/unittests/validation/BufferValidationTests.cpp
@@ -83,6 +83,15 @@
return device.CreateBufferMapped(&descriptor);
}
+ wgpu::Buffer BufferMappedAtCreation(uint64_t size, wgpu::BufferUsage usage) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage;
+ descriptor.mappedAtCreation = true;
+
+ return device.CreateBuffer(&descriptor);
+ }
+
wgpu::Queue queue;
private:
@@ -198,6 +207,21 @@
result.buffer.Unmap();
}
+// Test the success case for mappedAtCreation
+TEST_F(BufferValidationTest, MappedAtCreationSuccess) {
+ BufferMappedAtCreation(4, wgpu::BufferUsage::MapWrite);
+}
+
+// Test the success case for mappedAtCreation for a non-mappable usage
+TEST_F(BufferValidationTest, NonMappableMappedAtCreationSuccess) {
+ BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+}
+
+// Test there is an error when mappedAtCreation is set but the size isn't aligned to 4.
+TEST_F(BufferValidationTest, MappedAtCreationSizeAlignment) {
+ ASSERT_DEVICE_ERROR(BufferMappedAtCreation(2, wgpu::BufferUsage::MapWrite));
+}
+
// Test map reading a buffer with wrong current usage
TEST_F(BufferValidationTest, MapReadWrongUsage) {
wgpu::BufferDescriptor descriptor;
@@ -537,6 +561,20 @@
}
}
+// Test that is is invalid to Map a buffer mapped at creation.
+TEST_F(BufferValidationTest, MapBufferMappedAtCreation) {
+ {
+ wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::MapRead);
+ ASSERT_DEVICE_ERROR(buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr));
+ queue.Submit(0, nullptr);
+ }
+ {
+ wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::MapWrite);
+ ASSERT_DEVICE_ERROR(buf.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr));
+ queue.Submit(0, nullptr);
+ }
+}
+
// Test that it is valid to submit a buffer in a queue with a map usage if it is unmapped
TEST_F(BufferValidationTest, SubmitBufferWithMapUsage) {
wgpu::BufferDescriptor descriptorA;
@@ -609,6 +647,30 @@
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
queue.Submit(0, nullptr);
}
+ {
+ wgpu::BufferDescriptor mappedBufferDesc = descriptorA;
+ mappedBufferDesc.mappedAtCreation = true;
+ wgpu::Buffer bufA = device.CreateBuffer(&mappedBufferDesc);
+ wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+ wgpu::CommandBuffer commands = encoder.Finish();
+ ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+ queue.Submit(0, nullptr);
+ }
+ {
+ wgpu::BufferDescriptor mappedBufferDesc = descriptorB;
+ mappedBufferDesc.mappedAtCreation = true;
+ wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
+ wgpu::Buffer bufB = device.CreateBuffer(&mappedBufferDesc);
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
+ wgpu::CommandBuffer commands = encoder.Finish();
+ ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+ queue.Submit(0, nullptr);
+ }
}
// Test that it is invalid to submit a destroyed buffer in a queue
@@ -685,6 +747,15 @@
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
}
+ // Unmapped after mappedAtCreation case.
+ {
+ wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+ buf.Unmap();
+
+ ASSERT_EQ(nullptr, buf.GetMappedRange());
+ ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+ }
+
// Unmapped after MapReadAsync case.
{
wgpu::Buffer buf = CreateMapReadBuffer(4);
@@ -738,6 +809,15 @@
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
}
+ // Destroyed after mappedAtCreation case.
+ {
+ wgpu::Buffer buf = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+ buf.Destroy();
+
+ ASSERT_EQ(nullptr, buf.GetMappedRange());
+ ASSERT_EQ(nullptr, buf.GetConstMappedRange());
+ }
+
// Destroyed after MapReadAsync case.
{
wgpu::Buffer buf = CreateMapReadBuffer(4);
@@ -791,6 +871,13 @@
ASSERT_EQ(result.buffer.GetConstMappedRange(), result.data);
}
+ // GetMappedRange after mappedAtCreation case.
+ {
+ wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::CopySrc);
+ ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+ ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+ }
+
// GetMappedRange after MapReadAsync case.
{
wgpu::Buffer buf = CreateMapReadBuffer(4);
@@ -827,13 +914,25 @@
}
// Test valid cases to call GetMappedRange on an error buffer.
-// TODO(cwallez@chromium.org): enable after CreateBufferMapped is implemented in terms of
-// mappedAtCreation.
-TEST_F(BufferValidationTest, DISABLED_GetMappedRangeOnErrorBuffer) {
+TEST_F(BufferValidationTest, GetMappedRangeOnErrorBuffer) {
wgpu::BufferDescriptor desc;
desc.size = 4;
desc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead;
+ uint64_t kStupidLarge = uint64_t(1) << uint64_t(63);
+
+ // GetMappedRange after CreateBufferMapped a zero-sized buffer returns a non-nullptr.
+ // This is to check we don't do a malloc(0).
+ {
+ wgpu::CreateBufferMappedResult result;
+ ASSERT_DEVICE_ERROR(result = CreateBufferMapped(
+ 0, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+ ASSERT_NE(result.buffer.GetConstMappedRange(), nullptr);
+ ASSERT_EQ(result.buffer.GetConstMappedRange(), result.buffer.GetMappedRange());
+ ASSERT_EQ(result.buffer.GetConstMappedRange(), result.data);
+ }
+
// GetMappedRange after CreateBufferMapped non-OOM returns a non-nullptr.
{
wgpu::CreateBufferMappedResult result;
@@ -848,11 +947,44 @@
// GetMappedRange after CreateBufferMapped OOM case returns nullptr.
{
wgpu::CreateBufferMappedResult result;
- ASSERT_DEVICE_ERROR(result = CreateBufferMapped(
- 1 << 31, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+ ASSERT_DEVICE_ERROR(result =
+ CreateBufferMapped(kStupidLarge, wgpu::BufferUsage::Storage |
+ wgpu::BufferUsage::MapRead));
ASSERT_EQ(result.buffer.GetConstMappedRange(), nullptr);
ASSERT_EQ(result.buffer.GetConstMappedRange(), result.buffer.GetMappedRange());
ASSERT_EQ(result.buffer.GetConstMappedRange(), result.data);
}
+
+ // GetMappedRange after mappedAtCreation a zero-sized buffer returns a non-nullptr.
+ // This is to check we don't do a malloc(0).
+ {
+ wgpu::Buffer buffer;
+ ASSERT_DEVICE_ERROR(buffer = BufferMappedAtCreation(
+ 0, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+ ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+ ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+ }
+
+ // GetMappedRange after mappedAtCreation non-OOM returns a non-nullptr.
+ {
+ wgpu::Buffer buffer;
+ ASSERT_DEVICE_ERROR(buffer = BufferMappedAtCreation(
+ 4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+ ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
+ ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+ }
+
+ // GetMappedRange after mappedAtCreation OOM case returns nullptr.
+ {
+ wgpu::Buffer buffer;
+ ASSERT_DEVICE_ERROR(
+ buffer = BufferMappedAtCreation(
+ kStupidLarge, wgpu::BufferUsage::Storage | wgpu::BufferUsage::MapRead));
+
+ ASSERT_EQ(buffer.GetConstMappedRange(), nullptr);
+ ASSERT_EQ(buffer.GetConstMappedRange(), buffer.GetMappedRange());
+ }
}
diff --git a/src/tests/unittests/validation/QueueSubmitValidationTests.cpp b/src/tests/unittests/validation/QueueSubmitValidationTests.cpp
index a74dd7e..f94bc85 100644
--- a/src/tests/unittests/validation/QueueSubmitValidationTests.cpp
+++ b/src/tests/unittests/validation/QueueSubmitValidationTests.cpp
@@ -162,6 +162,18 @@
ASSERT_DEVICE_ERROR(queue.WriteBuffer(result.buffer, 0, &value, sizeof(value)));
}
+ // mappedAtCreation
+ {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = 4;
+ descriptor.usage = wgpu::BufferUsage::CopyDst;
+ descriptor.mappedAtCreation = true;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ uint32_t value = 0;
+ ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 0, &value, sizeof(value)));
+ }
+
// MapReadAsync
{
wgpu::BufferDescriptor descriptor;