SharedTextureMemory: support concurrent read access
This CL allows concurrent read access with the same
wgpu::Device for Metal, D3D11 and Metal backends.
Bug: dawn:2276
Change-Id: I001407a0a95aad2e2c16b3cc2b9875925d5f2ff7
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/168240
Commit-Queue: Peng Huang <penghuang@google.com>
Commit-Queue: Peng Huang <penghuang@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
diff --git a/src/dawn/dawn.json b/src/dawn/dawn.json
index a60df8e..73424db 100644
--- a/src/dawn/dawn.json
+++ b/src/dawn/dawn.json
@@ -1750,6 +1750,7 @@
"extensible": "in",
"tags": ["dawn", "native"],
"members": [
+ {"name": "concurrent read", "type": "bool"},
{"name": "initialized", "type": "bool"},
{"name": "fence count", "type": "size_t"},
{"name": "fences", "type": "shared fence", "annotation": "const*", "length": "fence count"},
diff --git a/src/dawn/native/SharedTextureMemory.cpp b/src/dawn/native/SharedTextureMemory.cpp
index 5deed5c..6a632b0 100644
--- a/src/dawn/native/SharedTextureMemory.cpp
+++ b/src/dawn/native/SharedTextureMemory.cpp
@@ -106,6 +106,18 @@
void SharedTextureMemoryBase::DestroyImpl() {}
+bool SharedTextureMemoryBase::HasWriteAccess() const {
+ return mHasWriteAccess;
+}
+
+bool SharedTextureMemoryBase::HasExclusiveReadAccess() const {
+ return mHasExclusiveReadAccess;
+}
+
+int SharedTextureMemoryBase::GetReadAccessCount() const {
+ return mReadAccessCount;
+}
+
void SharedTextureMemoryBase::Initialize() {
DAWN_ASSERT(!IsError());
mContents = CreateContents();
@@ -209,22 +221,11 @@
bool SharedTextureMemoryBase::APIBeginAccess(TextureBase* texture,
const BeginAccessDescriptor* descriptor) {
- bool didBegin = false;
- DAWN_UNUSED(GetDevice()->ConsumedError(
- [&]() -> MaybeError {
- // Validate there is not another ongoing access and then set the current access.
- // This is done first because BeginAccess should acquire access regardless of whether or
- // not the internals generate an error.
- DAWN_INVALID_IF(mCurrentAccess != nullptr,
- "Cannot begin access with %s on %s which is currently accessed by %s.",
- texture, this, mCurrentAccess.Get());
- mCurrentAccess = texture;
- didBegin = true;
-
- return BeginAccess(texture, descriptor);
- }(),
- "calling %s.BeginAccess(%s).", this, texture));
- return didBegin;
+ if (GetDevice()->ConsumedError(BeginAccess(texture, descriptor), "calling %s.BeginAccess(%s).",
+ this, texture)) {
+ return false;
+ }
+ return true;
}
bool SharedTextureMemoryBase::APIIsDeviceLost() {
@@ -233,17 +234,12 @@
MaybeError SharedTextureMemoryBase::BeginAccess(TextureBase* texture,
const BeginAccessDescriptor* rawDescriptor) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(texture));
+
UnpackedPtr<BeginAccessDescriptor> descriptor;
DAWN_TRY_ASSIGN(descriptor, ValidateAndUnpack(rawDescriptor));
- // Append begin fences first. Fences should be tracked regardless of whether later errors occur.
- for (size_t i = 0; i < descriptor->fenceCount; ++i) {
- mContents->mPendingFences->push_back(
- {descriptor->fences[i], descriptor->signaledValues[i]});
- }
-
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(texture));
for (size_t i = 0; i < descriptor->fenceCount; ++i) {
DAWN_TRY(GetDevice()->ValidateObject(descriptor->fences[i]));
}
@@ -251,41 +247,86 @@
DAWN_TRY(ValidateTextureCreatedFromSelf(texture));
DAWN_INVALID_IF(texture->GetFormat().IsMultiPlanar() && !descriptor->initialized,
- "BeginAccess on %s with multiplanar format (%s) must be initialized.", texture,
+ "%s with multiplanar format (%s) must be initialized.", texture,
texture->GetFormat().format);
- DAWN_TRY(BeginAccessImpl(texture, descriptor));
- if (!texture->IsError()) {
- texture->SetHasAccess(true);
- texture->SetIsSubresourceContentInitialized(descriptor->initialized,
- texture->GetAllSubresources());
+ DAWN_INVALID_IF(texture->IsDestroyed(), "%s has been destroyed.", texture);
+ DAWN_INVALID_IF(texture->HasAccess(), "%s is already used to access %s.", texture, this);
+
+ DAWN_INVALID_IF(mHasWriteAccess, "%s is currently accessed for writing.", this);
+ DAWN_INVALID_IF(mHasExclusiveReadAccess, "%s is currently accessed for exclusive reading.",
+ this);
+
+ if (texture->IsReadOnly()) {
+ if (descriptor->concurrentRead) {
+ DAWN_INVALID_IF(!descriptor->initialized, "Concurrent reading an uninitialized %s.",
+ texture);
+ ++mReadAccessCount;
+ } else {
+ DAWN_INVALID_IF(
+ mReadAccessCount != 0,
+ "Exclusive read access used while %s is currently accessed for reading.", this);
+ mHasExclusiveReadAccess = true;
+ }
+ } else {
+ DAWN_INVALID_IF(descriptor->concurrentRead, "Concurrent reading read-write %s.", texture);
+ DAWN_INVALID_IF(mReadAccessCount != 0,
+ "Read-Write access used while %s is currently accessed for reading.", this);
+ mHasWriteAccess = true;
}
+
+ DAWN_TRY(BeginAccessImpl(texture, descriptor));
+
+ for (size_t i = 0; i < descriptor->fenceCount; ++i) {
+ mContents->mPendingFences->push_back(
+ {descriptor->fences[i], descriptor->signaledValues[i]});
+ }
+
+ DAWN_ASSERT(!texture->IsError());
+ texture->SetHasAccess(true);
+ texture->SetIsSubresourceContentInitialized(descriptor->initialized,
+ texture->GetAllSubresources());
return {};
}
bool SharedTextureMemoryBase::APIEndAccess(TextureBase* texture, EndAccessState* state) {
bool didEnd = false;
- DAWN_UNUSED(GetDevice()->ConsumedError(
- [&]() -> MaybeError {
- DAWN_INVALID_IF(mCurrentAccess != texture,
- "Cannot end access with %s on %s which is currently accessed by %s.",
- texture, this, mCurrentAccess.Get());
- mCurrentAccess = nullptr;
- didEnd = true;
-
- return EndAccess(texture, state);
- }(),
- "calling %s.EndAccess(%s).", this, texture));
+ DAWN_UNUSED(GetDevice()->ConsumedError(EndAccess(texture, state, &didEnd),
+ "calling %s.EndAccess(%s).", this, texture));
return didEnd;
}
-MaybeError SharedTextureMemoryBase::EndAccess(TextureBase* texture, EndAccessState* state) {
+MaybeError SharedTextureMemoryBase::EndAccess(TextureBase* texture,
+ EndAccessState* state,
+ bool* didEnd) {
+ DAWN_TRY(GetDevice()->ValidateObject(texture));
+ DAWN_TRY(ValidateTextureCreatedFromSelf(texture));
+
+ DAWN_INVALID_IF(!texture->HasAccess(), "%s is not currently being accessed.", texture);
+
+ if (texture->IsReadOnly()) {
+ DAWN_ASSERT(!mHasWriteAccess);
+ if (mHasExclusiveReadAccess) {
+ DAWN_ASSERT(mReadAccessCount == 0);
+ mHasExclusiveReadAccess = false;
+ } else {
+ DAWN_ASSERT(!mHasExclusiveReadAccess);
+ --mReadAccessCount;
+ }
+ } else {
+ DAWN_ASSERT(mHasWriteAccess);
+ DAWN_ASSERT(!mHasExclusiveReadAccess);
+ DAWN_ASSERT(mReadAccessCount == 0);
+ mHasWriteAccess = false;
+ }
+
PendingFenceList fenceList;
mContents->AcquirePendingFences(&fenceList);
- if (!texture->IsError()) {
- texture->SetHasAccess(false);
- }
+ DAWN_ASSERT(!texture->IsError());
+ texture->SetHasAccess(false);
+
+ *didEnd = true;
// Call the error-generating part of the EndAccess implementation. This is separated out because
// writing the output state must happen regardless of whether or not EndAccessInternal
@@ -317,16 +358,13 @@
state->fences = nullptr;
state->signaledValues = nullptr;
}
- state->initialized = texture->IsError() ||
- texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
+ state->initialized = texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
return err;
}
ResultOrError<FenceAndSignalValue> SharedTextureMemoryBase::EndAccessInternal(
TextureBase* texture,
EndAccessState* rawState) {
- DAWN_TRY(GetDevice()->ValidateObject(texture));
- DAWN_TRY(ValidateTextureCreatedFromSelf(texture));
UnpackedPtr<EndAccessState> state;
DAWN_TRY_ASSIGN(state, ValidateAndUnpack(rawState));
return EndAccessImpl(texture, state);
diff --git a/src/dawn/native/SharedTextureMemory.h b/src/dawn/native/SharedTextureMemory.h
index acd3a78..d5ac3df 100644
--- a/src/dawn/native/SharedTextureMemory.h
+++ b/src/dawn/native/SharedTextureMemory.h
@@ -28,9 +28,6 @@
#ifndef SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
#define SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
-#include <map>
-#include <stack>
-
#include "dawn/common/StackContainer.h"
#include "dawn/common/WeakRef.h"
#include "dawn/common/WeakRefSupport.h"
@@ -92,16 +89,16 @@
void DestroyImpl() override;
- SharedTextureMemoryProperties mProperties;
-
- Ref<TextureBase> mCurrentAccess;
+ bool HasWriteAccess() const;
+ bool HasExclusiveReadAccess() const;
+ int GetReadAccessCount() const;
private:
virtual Ref<SharedTextureMemoryContents> CreateContents();
ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* rawDescriptor);
MaybeError BeginAccess(TextureBase* texture, const BeginAccessDescriptor* rawDescriptor);
- MaybeError EndAccess(TextureBase* texture, EndAccessState* state);
+ MaybeError EndAccess(TextureBase* texture, EndAccessState* state, bool* didEnd);
ResultOrError<FenceAndSignalValue> EndAccessInternal(TextureBase* texture,
EndAccessState* rawState);
@@ -119,6 +116,10 @@
TextureBase* texture,
UnpackedPtr<EndAccessState>& state) = 0;
+ SharedTextureMemoryProperties mProperties;
+ bool mHasWriteAccess = false;
+ bool mHasExclusiveReadAccess = false;
+ int mReadAccessCount = 0;
Ref<SharedTextureMemoryContents> mContents;
};
diff --git a/src/dawn/native/Texture.cpp b/src/dawn/native/Texture.cpp
index 52f7aa5..0afa627 100644
--- a/src/dawn/native/Texture.cpp
+++ b/src/dawn/native/Texture.cpp
@@ -959,6 +959,11 @@
mState.hasAccess = hasAccess;
}
+bool TextureBase::HasAccess() const {
+ DAWN_ASSERT(!IsError());
+ return mState.hasAccess;
+}
+
uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
uint32_t arraySlice,
Aspect aspect) const {
@@ -1028,6 +1033,10 @@
return mSampleCount > 1;
}
+bool TextureBase::IsReadOnly() const {
+ return IsSubset(mUsage, kReadOnlyTextureUsages);
+}
+
bool TextureBase::CoversFullSubresource(uint32_t mipLevel,
Aspect aspect,
const Extent3D& size) const {
diff --git a/src/dawn/native/Texture.h b/src/dawn/native/Texture.h
index f3acaef..60c661b 100644
--- a/src/dawn/native/Texture.h
+++ b/src/dawn/native/Texture.h
@@ -111,6 +111,7 @@
bool IsDestroyed() const;
void SetHasAccess(bool hasAccess);
+ bool HasAccess() const;
uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
@@ -118,6 +119,7 @@
MaybeError ValidateCanUseInSubmitNow() const;
bool IsMultisampledTexture() const;
+ bool IsReadOnly() const;
// Returns true if the size covers the whole subresource.
bool CoversFullSubresource(uint32_t mipLevel, Aspect aspect, const Extent3D& size) const;
diff --git a/src/dawn/native/d3d/SharedTextureMemoryD3D.cpp b/src/dawn/native/d3d/SharedTextureMemoryD3D.cpp
index a56aebb..58e2ec1 100644
--- a/src/dawn/native/d3d/SharedTextureMemoryD3D.cpp
+++ b/src/dawn/native/d3d/SharedTextureMemoryD3D.cpp
@@ -69,7 +69,9 @@
}
}
- if (mDXGIKeyedMutex) {
+ // Acquire keyed mutex for the first access.
+ if (mDXGIKeyedMutex &&
+ (HasWriteAccess() || HasExclusiveReadAccess() || GetReadAccessCount() == 1)) {
DAWN_TRY(CheckHRESULT(mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireKey, INFINITE),
"Acquire keyed mutex"));
}
@@ -84,7 +86,9 @@
"Required feature (%s) is missing.",
wgpu::FeatureName::SharedFenceDXGISharedHandle);
- if (mDXGIKeyedMutex) {
+ // Release keyed mutex for the last access.
+ if (mDXGIKeyedMutex && !HasWriteAccess() && !HasExclusiveReadAccess() &&
+ GetReadAccessCount() == 0) {
mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireKey);
}
diff --git a/src/dawn/native/d3d12/SharedTextureMemoryD3D12.cpp b/src/dawn/native/d3d12/SharedTextureMemoryD3D12.cpp
index 1aa982b..deb4bda 100644
--- a/src/dawn/native/d3d12/SharedTextureMemoryD3D12.cpp
+++ b/src/dawn/native/d3d12/SharedTextureMemoryD3D12.cpp
@@ -121,6 +121,9 @@
MaybeError SharedTextureMemory::BeginAccessImpl(
TextureBase* texture,
const UnpackedPtr<BeginAccessDescriptor>& descriptor) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_INVALID_IF(descriptor->concurrentRead, "D3D12 backend doesn't support concurrent read.");
+
DAWN_TRY(d3d::SharedTextureMemory::BeginAccessImpl(texture, descriptor));
// Reset state to COMMON. BeginAccess contains a list of fences to wait on after
// which the texture's usage will complete on the GPU.
diff --git a/src/dawn/native/vulkan/SharedTextureMemoryVk.cpp b/src/dawn/native/vulkan/SharedTextureMemoryVk.cpp
index f737171..de58b32 100644
--- a/src/dawn/native/vulkan/SharedTextureMemoryVk.cpp
+++ b/src/dawn/native/vulkan/SharedTextureMemoryVk.cpp
@@ -993,6 +993,9 @@
MaybeError SharedTextureMemory::BeginAccessImpl(
TextureBase* texture,
const UnpackedPtr<BeginAccessDescriptor>& descriptor) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_INVALID_IF(descriptor->concurrentRead, "Vulkan backend doesn't support concurrent read.");
+
wgpu::SType type;
DAWN_TRY_ASSIGN(
type, (descriptor.ValidateBranches<Branch<SharedTextureMemoryVkImageLayoutBeginState>>()));
diff --git a/src/dawn/tests/white_box/SharedTextureMemoryTests.cpp b/src/dawn/tests/white_box/SharedTextureMemoryTests.cpp
index c35bc85..bb6e696 100644
--- a/src/dawn/tests/white_box/SharedTextureMemoryTests.cpp
+++ b/src/dawn/tests/white_box/SharedTextureMemoryTests.cpp
@@ -489,6 +489,64 @@
return {encoder.Finish(), colorTarget};
}
+// Make a command buffer that samples the contents of the input texture into an RGBA8Unorm texture.
+std::pair<wgpu::CommandBuffer, wgpu::Texture>
+SharedTextureMemoryTests::MakeCheckBySamplingTwoTexturesCommandBuffer(wgpu::Texture& texture0,
+ wgpu::Texture& texture1) {
+ wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
+ @vertex fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
+ let pos = array(
+ vec2( 1.0, 1.0),
+ vec2( 1.0, -1.0),
+ vec2(-1.0, -1.0),
+ vec2( 1.0, 1.0),
+ vec2(-1.0, -1.0),
+ vec2(-1.0, 1.0),
+ );
+ return vec4f(pos[VertexIndex], 0.0, 1.0);
+ }
+
+ @group(0) @binding(0) var t0: texture_2d<f32>;
+ @group(0) @binding(1) var t1: texture_2d<f32>;
+
+ @fragment fn frag_main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4f {
+ return (textureLoad(t0, vec2u(coord_in.xy), 0) / 2) +
+ (textureLoad(t1, vec2u(coord_in.xy), 0) / 2);
+ }
+ )");
+
+ wgpu::TextureDescriptor textureDesc = {};
+ textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+ textureDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+ textureDesc.size = {texture0.GetWidth(), texture0.GetHeight(),
+ texture0.GetDepthOrArrayLayers()};
+ textureDesc.label = "intermediate check texture";
+
+ wgpu::Texture colorTarget = device.CreateTexture(&textureDesc);
+
+ utils::ComboRenderPipelineDescriptor pipelineDesc;
+ pipelineDesc.vertex.module = module;
+ pipelineDesc.cFragment.module = module;
+ pipelineDesc.cTargets[0].format = colorTarget.GetFormat();
+
+ wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+ wgpu::BindGroup bindGroup =
+ utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+ {{0, texture0.CreateView()}, {1, texture1.CreateView()}});
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ utils::ComboRenderPassDescriptor passDescriptor({colorTarget.CreateView()});
+ passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+ pass.SetPipeline(pipeline);
+ pass.SetBindGroup(0, bindGroup);
+ pass.Draw(6);
+ pass.End();
+ return {encoder.Finish(), colorTarget};
+}
+
// Check that the contents of colorTarget are RGBA8Unorm texels that match those written by
// MakeFourColorsClearCommandBuffer.
void SharedTextureMemoryTests::CheckFourColors(wgpu::Device& deviceObj,
@@ -592,13 +650,14 @@
HasSubstr("is invalid"));
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
- ASSERT_DEVICE_ERROR_MSG(EXPECT_TRUE(memory.BeginAccess(texture, &beginDesc)),
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(texture, &beginDesc)),
HasSubstr("is invalid"));
wgpu::SharedTextureMemoryEndAccessState endState = {};
- ASSERT_DEVICE_ERROR_MSG(EXPECT_TRUE(memory.EndAccess(texture, &endState)),
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.EndAccess(texture, &endState)),
HasSubstr("is invalid"));
}
}
@@ -894,13 +953,14 @@
wgpu::Texture texture = memory.CreateTexture();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
// It should be an error to BeginAccess twice in a row.
EXPECT_TRUE(memory.BeginAccess(texture, &beginDesc));
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(texture, &beginDesc)),
- HasSubstr("Cannot begin access with"));
+ HasSubstr("is already used to access"));
}
// Test that it is an error to call BeginAccess concurrently on a write texture
@@ -912,12 +972,35 @@
wgpu::Texture readTexture = CreateReadTexture(memory);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
EXPECT_TRUE(memory.BeginAccess(writeTexture, &beginDesc));
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(readTexture, &beginDesc)),
- HasSubstr("Cannot begin access with"));
+ HasSubstr("is currently accessed for writing"));
+}
+
+// Test that it is an error to call BeginAccess concurrently on a write texture
+// followed by a read texture on a single SharedTextureMemory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesWriteConcurrentRead) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture writeTexture = CreateWriteTexture(memory);
+ wgpu::Texture readTexture = CreateReadTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ EXPECT_TRUE(memory.BeginAccess(writeTexture, &beginDesc));
+ beginDesc.concurrentRead = true;
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(readTexture, &beginDesc)),
+ HasSubstr("is currently accessed for writing"));
}
// Test that it is an error to call BeginAccess concurrently on a read texture
@@ -929,12 +1012,35 @@
wgpu::Texture readTexture = CreateReadTexture(memory);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
EXPECT_TRUE(memory.BeginAccess(readTexture, &beginDesc));
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(writeTexture, &beginDesc)),
- HasSubstr("Cannot begin access with"));
+ HasSubstr("is currently accessed for exclusive reading"));
+}
+
+// Test that it is an error to call BeginAccess concurrently on a read texture
+// followed by a write texture on a single SharedTextureMemory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesConcurrentReadWrite) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture writeTexture = CreateWriteTexture(memory);
+ wgpu::Texture readTexture = CreateReadTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = true;
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ EXPECT_TRUE(memory.BeginAccess(readTexture, &beginDesc));
+ beginDesc.concurrentRead = false;
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(writeTexture, &beginDesc)),
+ HasSubstr("is currently accessed for reading."));
}
// Test that it is an error to call BeginAccess concurrently on two write textures on a single
@@ -946,18 +1052,17 @@
wgpu::Texture writeTexture2 = CreateWriteTexture(memory);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
EXPECT_TRUE(memory.BeginAccess(writeTexture1, &beginDesc));
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(writeTexture2, &beginDesc)),
- HasSubstr("Cannot begin access with"));
+ HasSubstr("is currently accessed for writing"));
}
-// Test that it is an error to call BeginAccess concurrently on two read textures on a single
+// Test that it is valid to call BeginAccess concurrently on two read textures on a single
// SharedTextureMemory.
-// TODO(crbug.com/dawn/2276): Support concurrent read access in
-// SharedTextureMemory and update this test.
TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesReadRead) {
wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
@@ -965,12 +1070,99 @@
wgpu::Texture readTexture2 = CreateReadTexture(memory);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ EXPECT_TRUE(memory.BeginAccess(readTexture1, &beginDesc));
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(readTexture2, &beginDesc)),
+ HasSubstr("is currently accessed for exclusive reading"));
+}
+
+// Test that it is valid to call BeginAccess concurrently on two read textures on a single
+// SharedTextureMemory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesConcurrentReadConcurrentRead) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture readTexture1 = CreateReadTexture(memory);
+ wgpu::Texture readTexture2 = CreateReadTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = true;
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ EXPECT_TRUE(memory.BeginAccess(readTexture1, &beginDesc));
+ EXPECT_TRUE(memory.BeginAccess(readTexture2, &beginDesc));
+
+ wgpu::SharedTextureMemoryEndAccessState endState1 = {};
+ EXPECT_TRUE(memory.EndAccess(readTexture1, &endState1));
+ wgpu::SharedTextureMemoryEndAccessState endState2 = {};
+ EXPECT_TRUE(memory.EndAccess(readTexture2, &endState2));
+}
+
+// Test that it is valid to call BeginAccess concurrently on read textures on a single
+// SharedTextureMemory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesConcurrentReadRead) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture readTexture1 = CreateReadTexture(memory);
+ wgpu::Texture readTexture2 = CreateReadTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+ beginDesc.concurrentRead = true;
EXPECT_TRUE(memory.BeginAccess(readTexture1, &beginDesc));
+ beginDesc.concurrentRead = false;
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(readTexture2, &beginDesc)),
- HasSubstr("Cannot begin access with"));
+ HasSubstr("is currently accessed for reading."));
+}
+
+// Test that it is valid to call BeginAccess concurrently on read textures on a single
+// SharedTextureMemory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTexturesReadConcurrentRead) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture readTexture1 = CreateReadTexture(memory);
+ wgpu::Texture readTexture2 = CreateReadTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ beginDesc.concurrentRead = false;
+ EXPECT_TRUE(memory.BeginAccess(readTexture1, &beginDesc));
+ beginDesc.concurrentRead = true;
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(readTexture2, &beginDesc)),
+ HasSubstr("is currently accessed for exclusive reading."));
+}
+
+// Test that it is valid to call BeginAccess concurrently on write textures with concurrentRead is
+// true.
+TEST_P(SharedTextureMemoryTests, ConcurrentWrite) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+
+ wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+ wgpu::Texture writeTexture = CreateWriteTexture(memory);
+
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.initialized = true;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+
+ beginDesc.concurrentRead = true;
+ ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.BeginAccess(writeTexture, &beginDesc)),
+ HasSubstr("Concurrent reading read-write"));
}
// Test that it is an error to call EndAccess twice in a row on the same memory.
@@ -979,6 +1171,7 @@
wgpu::Texture texture = memory.CreateTexture();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -990,7 +1183,7 @@
// Invalid to end access a second time.
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.EndAccess(texture, &endState)),
- HasSubstr("Cannot end access"));
+ HasSubstr("is not currently being accessed"));
}
// Test that it is an error to call EndAccess on a texture that was not the one BeginAccess was
@@ -1001,6 +1194,7 @@
wgpu::Texture texture2 = memory.CreateTexture();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -1009,7 +1203,7 @@
wgpu::SharedTextureMemoryEndAccessState endState = {};
auto backendEndState = GetParam().mBackend->ChainEndState(&endState);
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.EndAccess(texture2, &endState)),
- HasSubstr("Cannot end access"));
+ HasSubstr("is not currently being accessed"));
}
// Test that it is an error to call EndAccess without a preceding BeginAccess.
@@ -1020,7 +1214,7 @@
wgpu::SharedTextureMemoryEndAccessState endState = {};
auto backendEndState = GetParam().mBackend->ChainEndState(&endState);
ASSERT_DEVICE_ERROR_MSG(EXPECT_FALSE(memory.EndAccess(texture, &endState)),
- HasSubstr("Cannot end access"));
+ HasSubstr("is not currently being accessed"));
}
// Test that it is an error to use the texture on the queue without a preceding BeginAccess.
@@ -1067,6 +1261,7 @@
memory.GetProperties(&properties);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -1115,6 +1310,7 @@
wgpu::CommandBuffer commandBuffer =
MakeFourColorsClearCommandBuffer(device, texture);
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
memory.BeginAccess(texture, &beginDesc);
device.GetQueue().Submit(1, &commandBuffer);
@@ -1125,6 +1321,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = endState.fences;
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
memory.BeginAccess(texture, &beginDesc);
@@ -1145,6 +1342,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = endState.fences;
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1212,6 +1410,7 @@
auto backendEndState = GetParam().mBackend->ChainEndState(&endState);
{
wgpu::Texture texture = memory.CreateTexture();
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
memory.BeginAccess(texture, &beginDesc);
@@ -1227,6 +1426,7 @@
wgpu::Texture texture = memory.CreateTexture();
beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
memory.BeginAccess(texture, &beginDesc);
@@ -1258,6 +1458,7 @@
wgpu::Texture texture = memories[0].CreateTexture();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(texture, &beginDesc);
@@ -1297,6 +1498,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1325,6 +1527,7 @@
wgpu::Texture texture = memories[0].CreateTexture();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(texture, &beginDesc);
@@ -1346,6 +1549,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1380,6 +1584,7 @@
MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(textures[0], &beginDesc);
@@ -1397,6 +1602,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1427,6 +1633,7 @@
MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(textures[0], &beginDesc);
@@ -1447,6 +1654,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1475,6 +1683,7 @@
MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -1494,6 +1703,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1535,6 +1745,7 @@
MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(textures[0], &beginDesc);
@@ -1556,6 +1767,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1629,6 +1841,7 @@
// Begin access on texture 0
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
memories[0].BeginAccess(textures[0], &beginDesc);
@@ -1650,6 +1863,7 @@
beginDesc.fenceCount = sharedFences.size();
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
memories[1].BeginAccess(textures[1], &beginDesc);
@@ -1695,6 +1909,7 @@
beginDesc.fenceCount = sharedFences.size();
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = signaledValues.data();
+ beginDesc.concurrentRead = false;
beginDesc.initialized = true;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState2);
@@ -1706,6 +1921,142 @@
}
}
+// Test a shared texture memory created on one device. Create three textures from the memory,
+// Write to one texture, then read from two separate textures `concurrently`, then write again.
+// Reads should happen strictly after the writes. The final write should wait for the reads.
+TEST_P(SharedTextureMemoryTests, SameDeviceWriteThenConcurrentReadThenWrite) {
+ // TODO(dawn/2276): support concurrent read access.
+ DAWN_TEST_UNSUPPORTED_IF(IsD3D12() || IsVulkan());
+
+ DAWN_TEST_UNSUPPORTED_IF(!GetParam().mBackend->SupportsConcurrentRead());
+
+ for (const auto& memories :
+ GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+ {device}, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding)) {
+ auto memory = memories[0];
+ wgpu::SharedTextureMemoryProperties properties;
+ memory.GetProperties(&properties);
+
+ wgpu::TextureDescriptor writeTextureDesc = {};
+ writeTextureDesc.format = properties.format;
+ writeTextureDesc.size = properties.size;
+ writeTextureDesc.usage =
+ wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+ writeTextureDesc.label = "write texture";
+
+ wgpu::TextureDescriptor readTextureDesc = {};
+ readTextureDesc.format = properties.format;
+ readTextureDesc.size = properties.size;
+ readTextureDesc.usage = wgpu::TextureUsage::TextureBinding;
+ readTextureDesc.label = "read texture";
+
+ // Create three textures from each memory.
+ // The first one will be written to.
+ // The second two will be concurrently read after the write.
+ // Then the first one will be written to again.
+ wgpu::Texture textures[] = {memory.CreateTexture(&writeTextureDesc),
+ memory.CreateTexture(&readTextureDesc),
+ memory.CreateTexture(&readTextureDesc)};
+
+ // Build command buffers for the test.
+ wgpu::CommandBuffer writeCommandBuffer0 =
+ MakeFourColorsClearCommandBuffer(device, textures[0]);
+
+ auto [checkCommandBuffer, colorTarget] =
+ MakeCheckBySamplingTwoTexturesCommandBuffer(textures[1], textures[2]);
+
+ wgpu::CommandBuffer clearToGrayCommandBuffer0;
+ {
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ utils::ComboRenderPassDescriptor passDescriptor({textures[0].CreateView()});
+ passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+ passDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+ passDescriptor.cColorAttachments[0].clearValue = {0.5, 0.5, 0.5, 1.0};
+
+ encoder.BeginRenderPass(&passDescriptor).End();
+ clearToGrayCommandBuffer0 = encoder.Finish();
+ }
+
+ // Begin access on texture 0
+ wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
+ beginDesc.initialized = false;
+ auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
+ memory.BeginAccess(textures[0], &beginDesc);
+
+ // Write
+ device.GetQueue().Submit(1, &writeCommandBuffer0);
+
+ // End access on texture 0
+ wgpu::SharedTextureMemoryEndAccessState endState = {};
+ auto backendEndState = GetParam().mBackend->ChainEndState(&endState);
+ memory.EndAccess(textures[0], &endState);
+ EXPECT_TRUE(endState.initialized);
+
+ // Import fences to device and begin access.
+ std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+ for (size_t i = 0; i < endState.fenceCount; ++i) {
+ sharedFences[i] = GetParam().mBackend->ImportFenceTo(device, endState.fences[i]);
+ }
+ beginDesc.fenceCount = sharedFences.size();
+ beginDesc.fences = sharedFences.data();
+ beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = true;
+ beginDesc.initialized = true;
+ backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
+ memory.BeginAccess(textures[1], &beginDesc);
+
+ // Import fences to device and begin access.
+ for (size_t i = 0; i < endState.fenceCount; ++i) {
+ sharedFences[i] = GetParam().mBackend->ImportFenceTo(device, endState.fences[i]);
+ }
+ memory.BeginAccess(textures[2], &beginDesc);
+
+ // Check contents
+ device.GetQueue().Submit(1, &checkCommandBuffer);
+ CheckFourColors(device, textures[1].GetFormat(), colorTarget);
+
+ // End access on texture 1
+ wgpu::SharedTextureMemoryEndAccessState endState1;
+ auto backendEndState1 = GetParam().mBackend->ChainEndState(&endState1);
+ memory.EndAccess(textures[1], &endState1);
+ EXPECT_TRUE(endState1.initialized);
+
+ // End access on texture 2
+ wgpu::SharedTextureMemoryEndAccessState endState2;
+ auto backendEndState2 = GetParam().mBackend->ChainEndState(&endState2);
+ memory.EndAccess(textures[2], &endState2);
+ EXPECT_TRUE(endState2.initialized);
+
+ // Import fences back to devices[0]
+ sharedFences.resize(endState1.fenceCount + endState2.fenceCount);
+ std::vector<uint64_t> signaledValues(sharedFences.size());
+
+ for (size_t i = 0; i < endState1.fenceCount; ++i) {
+ sharedFences[i] = GetParam().mBackend->ImportFenceTo(device, endState1.fences[i]);
+ signaledValues[i] = endState1.signaledValues[i];
+ }
+ for (size_t i = 0; i < endState2.fenceCount; ++i) {
+ sharedFences[i + endState1.fenceCount] =
+ GetParam().mBackend->ImportFenceTo(device, endState2.fences[i]);
+ signaledValues[i + endState1.fenceCount] = endState2.signaledValues[i];
+ }
+
+ beginDesc.fenceCount = sharedFences.size();
+ beginDesc.fences = sharedFences.data();
+ beginDesc.signaledValues = signaledValues.data();
+ beginDesc.concurrentRead = false;
+ beginDesc.initialized = true;
+ backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState2);
+
+ // Begin access on texture 0
+ memory.BeginAccess(textures[0], &beginDesc);
+
+ // Submit a clear to gray.
+ device.GetQueue().Submit(1, &clearToGrayCommandBuffer0);
+ }
+}
+
// Test that textures created from SharedTextureMemory may perform sRGB reinterpretation.
TEST_P(SharedTextureMemoryTests, SRGBReinterpretation) {
// TODO(crbug.com/dawn/2304): Investigate if the VVL is wrong here.
@@ -1750,6 +2101,7 @@
wgpu::CommandBuffer commands = encoder.Finish();
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -1770,6 +2122,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
@@ -1811,6 +2164,7 @@
MakeCheckBySamplingCommandBuffer(devices[1], texture1);
wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+ beginDesc.concurrentRead = false;
beginDesc.initialized = false;
auto backendBeginState = GetParam().mBackend->ChainInitialBeginState(&beginDesc);
@@ -1830,6 +2184,7 @@
beginDesc.fenceCount = endState.fenceCount;
beginDesc.fences = sharedFences.data();
beginDesc.signaledValues = endState.signaledValues;
+ beginDesc.concurrentRead = false;
beginDesc.initialized = endState.initialized;
backendBeginState = GetParam().mBackend->ChainBeginState(&beginDesc, endState);
diff --git a/src/dawn/tests/white_box/SharedTextureMemoryTests.h b/src/dawn/tests/white_box/SharedTextureMemoryTests.h
index a05a5a1..80b40f3 100644
--- a/src/dawn/tests/white_box/SharedTextureMemoryTests.h
+++ b/src/dawn/tests/white_box/SharedTextureMemoryTests.h
@@ -175,6 +175,9 @@
std::pair<wgpu::CommandBuffer, wgpu::Texture> MakeCheckBySamplingCommandBuffer(
wgpu::Device& deviceObj,
wgpu::Texture& texture);
+ std::pair<wgpu::CommandBuffer, wgpu::Texture> MakeCheckBySamplingTwoTexturesCommandBuffer(
+ wgpu::Texture& texture0,
+ wgpu::Texture& texture1);
void CheckFourColors(wgpu::Device& deviceObj,
wgpu::TextureFormat format,
wgpu::Texture& colorTarget);