Fix ApiObjectList::Destroy() race
There is a race if ApiObjectList::Destroy() is going to destroy an
object and another thread drops the last reference to the object at the
same time. The other thread won't run DestroyImpl() but it will call
`delete this` and delete the object resulting in a use-after-free.
Keeping the ApiObjectList lock during DestroyImpl() calls won't work due
to https://crbug.com/42240929.
Have ApiObjectList::Destroy() acquire a reference to all of the
objects. RefCounted::TryAddRef() is added to expose the try to add
reference functionality that WeakRefData was already using.
Bug: 396294899, 429112750
Change-Id: Ib0dcad57bccac87835c2c6d1cab3609cf0e53ee7
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/251196
Commit-Queue: Kyle Charbonneau <kylechar@google.com>
Reviewed-by: Brandon Jones <bajones@chromium.org>
diff --git a/src/dawn/common/RefCounted.cpp b/src/dawn/common/RefCounted.cpp
index 4c57023..3e019bd 100644
--- a/src/dawn/common/RefCounted.cpp
+++ b/src/dawn/common/RefCounted.cpp
@@ -148,6 +148,10 @@
}
}
+bool RefCounted::TryAddRef() {
+ return mRefCount.TryIncrement();
+}
+
void RefCounted::ReleaseAndLockBeforeDestroy() {
if (mRefCount.Decrement()) {
LockAndDeleteThis();
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
index 419a592..fa7e5f7 100644
--- a/src/dawn/common/RefCounted.h
+++ b/src/dawn/common/RefCounted.h
@@ -75,13 +75,14 @@
// synchronization in place for destruction.
void Release();
+ // Tries to add a reference. Returns false if the ref count is already at 0. This is used when
+ // operating on a raw pointer to a RefCounted instead of a valid Ref that may be soon deleted.
+ bool TryAddRef();
+
void APIAddRef() { AddRef(); }
void APIRelease() { Release(); }
protected:
- // Friend class is needed to access the RefCount to TryIncrement.
- friend class detail::WeakRefData;
-
virtual ~RefCounted();
void ReleaseAndLockBeforeDestroy();
diff --git a/src/dawn/common/WeakRefSupport.cpp b/src/dawn/common/WeakRefSupport.cpp
index 35f89bd..37fe823 100644
--- a/src/dawn/common/WeakRefSupport.cpp
+++ b/src/dawn/common/WeakRefSupport.cpp
@@ -35,7 +35,7 @@
Ref<RefCounted> WeakRefData::TryGetRef() {
std::lock_guard<std::mutex> lock(mMutex);
- if (!mValue || !mValue->mRefCount.TryIncrement()) {
+ if (!mValue || !mValue->TryAddRef()) {
return nullptr;
}
return AcquireRef(mValue.get());
diff --git a/src/dawn/native/ObjectBase.cpp b/src/dawn/native/ObjectBase.cpp
index 0555c7f..ba08289 100644
--- a/src/dawn/native/ObjectBase.cpp
+++ b/src/dawn/native/ObjectBase.cpp
@@ -78,18 +78,27 @@
mObjects.Use([&objectsToDestroy, this](auto lockedObjects) {
mMarkedDestroyed.store(true, std::memory_order_release);
- // Remove objects from ApiObjectList while holding the lock. The objects become untracked
- // so if another thread drops the last reference it won't also call DestroyImpl().
- while (!lockedObjects->empty()) {
- auto* head = lockedObjects->head();
- bool removed = head->RemoveFromList();
- DAWN_ASSERT(removed);
- objectsToDestroy.push_back(head->value());
+ // Try to acquire a reference to all objects to ensure they aren't deleted on another thread
+ // before DestroyImpl() runs below. If the object already has zero refs then another thread
+ // is about to delete it, the object is left in the list so it gets deleted+destroyed on the
+ // other thread.
+ for (auto* node = lockedObjects->head(); node != lockedObjects->end();) {
+ auto* next = node->next();
+
+ if (ApiObjectBase* object = node->value(); object->TryAddRef()) {
+ objectsToDestroy.push_back(object);
+ bool removed = node->RemoveFromList();
+ DAWN_ASSERT(removed);
+ }
+
+ node = next;
}
});
for (ApiObjectBase* object : objectsToDestroy) {
object->DestroyImpl();
+ // `object` may be deleted here if last real reference was dropped on another thread.
+ object->Release();
}
}
diff --git a/src/dawn/tests/end2end/MultithreadTests.cpp b/src/dawn/tests/end2end/MultithreadTests.cpp
index bd23df0..1a56b40 100644
--- a/src/dawn/tests/end2end/MultithreadTests.cpp
+++ b/src/dawn/tests/end2end/MultithreadTests.cpp
@@ -654,6 +654,68 @@
});
}
+// Test that destroying Texture and associated TextureViews simultaneously on different threads
+// works. These was a data race here previously, see https://crbug.com/396294899 for more details.
+TEST_P(MultithreadTests, DestroyTextureAndViewsAtSameTime) {
+ constexpr uint32_t kNumViews = 10;
+ constexpr uint32_t kNumThreads = kNumViews + 1;
+ constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::R8Unorm;
+ constexpr wgpu::TextureUsage kTextureUsage = wgpu::TextureUsage::CopySrc |
+ wgpu::TextureUsage::CopyDst |
+ wgpu::TextureUsage::RenderAttachment;
+
+ for (uint32_t j = 0; j < 50; ++j) {
+ wgpu::TextureDescriptor texDescriptor = {};
+ texDescriptor.size = {1, 1, kNumViews};
+ texDescriptor.format = kTextureFormat;
+ texDescriptor.usage = kTextureUsage;
+ texDescriptor.mipLevelCount = 1;
+ texDescriptor.sampleCount = 1;
+
+ wgpu::Texture texture = device.CreateTexture(&texDescriptor);
+ wgpu::TextureView textureViews[kNumViews];
+
+ for (uint32_t i = 0; i < kNumViews; ++i) {
+ wgpu::TextureViewDescriptor viewDescriptor = {};
+ viewDescriptor.format = kTextureFormat;
+ viewDescriptor.usage = kTextureUsage;
+ viewDescriptor.mipLevelCount = 1;
+ viewDescriptor.baseArrayLayer = i;
+ viewDescriptor.arrayLayerCount = 1;
+ textureViews[i] = texture.CreateView(&viewDescriptor);
+ }
+
+ // Wait for all threads to be ready to destroy the Texture or TextureViews at the same time.
+ // TODO(kylechar): std::latch would be much simpler here but MSVC bots fail to run
+ // dawn_end2end_tests when it's used.
+ std::mutex mutex;
+ std::condition_variable cv;
+ uint32_t waiting = kNumThreads;
+
+ utils::RunInParallel(kNumThreads, [&](uint32_t id) {
+ bool notify = false;
+ {
+ std::lock_guard lock(mutex);
+ if (--waiting == 0) {
+ notify = true;
+ }
+ }
+ if (notify) {
+ cv.notify_all();
+ } else {
+ std::unique_lock lock(mutex);
+ cv.wait(lock, [&waiting]() { return waiting == 0; });
+ }
+
+ if (id == 0) {
+ texture.Destroy();
+ } else {
+ textureViews[id - 1] = {};
+ }
+ });
+ }
+}
+
class MultithreadCachingTests : public MultithreadTests {
protected:
wgpu::ShaderModule CreateComputeShaderModule() const {