Updates ContentLessObjectCache to use WeakRefs.
- Re-implements the cache using WeakRefs.
- Cleans up old Uncache* functions that are no longer needed.
- Removes "cached" state from CachedObject since it's no longer needed.
- Minor updates to related tests.
Bug: dawn:1769
Change-Id: I25cf5d3d5d3c9bcd793239db735304e74e9d6b76
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/139283
Reviewed-by: Austin Eng <enga@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Loko Kung <lokokung@google.com>
diff --git a/src/dawn/common/BUILD.gn b/src/dawn/common/BUILD.gn
index 297565f..6b72bb5 100644
--- a/src/dawn/common/BUILD.gn
+++ b/src/dawn/common/BUILD.gn
@@ -238,6 +238,7 @@
"ConcurrentCache.h",
"Constants.h",
"ContentLessObjectCache.h",
+ "ContentLessObjectCacheable.h",
"CoreFoundationRef.h",
"DynamicLib.cpp",
"DynamicLib.h",
diff --git a/src/dawn/common/CMakeLists.txt b/src/dawn/common/CMakeLists.txt
index cb1b388..ca78909 100644
--- a/src/dawn/common/CMakeLists.txt
+++ b/src/dawn/common/CMakeLists.txt
@@ -41,6 +41,7 @@
"ConcurrentCache.h"
"Constants.h"
"ContentLessObjectCache.h"
+ "ContentLessObjectCacheable.h"
"CoreFoundationRef.h"
"DynamicLib.cpp"
"DynamicLib.h"
diff --git a/src/dawn/common/ContentLessObjectCache.h b/src/dawn/common/ContentLessObjectCache.h
index 551f796..6654777 100644
--- a/src/dawn/common/ContentLessObjectCache.h
+++ b/src/dawn/common/ContentLessObjectCache.h
@@ -20,68 +20,187 @@
#include <type_traits>
#include <unordered_set>
#include <utility>
+#include <variant>
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/Ref.h"
#include "dawn/common/RefCounted.h"
+#include "dawn/common/WeakRef.h"
namespace dawn {
+namespace detail {
-// The ContentLessObjectCache stores raw pointers to living Refs without adding to their refcounts.
-// This means that any RefCountedT that is inserted into the cache needs to make sure that their
-// DeleteThis function erases itself from the cache. Otherwise, the cache can grow indefinitely via
-// leaked pointers to deleted Refs.
+// Tagged-type to force special path for EqualityFunc when dealing with Erase. When erasing, we only
+// care about pointer equality, not value equality. This is also particularly important because
+// trying to promote on the Erase path can cause failures as the object's last ref could've been
+// dropped already.
+template <typename RefCountedT>
+struct ForErase {
+ explicit ForErase(RefCountedT* value) : mValue(value) {}
+ RefCountedT* mValue;
+};
+
+// All cached WeakRefs must have an immutable hash value determined at insertion. This ensures that
+// even if the last ref of the cached value is dropped, we still get the same hash in the set for
+// erasing.
+template <typename RefCountedT>
+using WeakRefAndHash = std::pair<WeakRef<RefCountedT>, size_t>;
+
+// The cache always holds WeakRefs internally, however, to enable lookups using pointers and special
+// Erase equality, we use a variant type to branch.
+template <typename RefCountedT>
+using ContentLessObjectCacheKey =
+ std::variant<RefCountedT*, WeakRefAndHash<RefCountedT>, ForErase<RefCountedT>>;
+
+enum class KeyType : size_t { Pointer = 0, WeakRef = 1, ForErase = 2 };
+
+template <typename RefCountedT>
+struct ContentLessObjectCacheHashVisitor {
+ using BaseHashFunc = typename RefCountedT::HashFunc;
+
+ size_t operator()(const RefCountedT* ptr) const { return BaseHashFunc()(ptr); }
+ size_t operator()(const WeakRefAndHash<RefCountedT>& weakref) const { return weakref.second; }
+ size_t operator()(const ForErase<RefCountedT>& forErase) const {
+ return BaseHashFunc()(forErase.mValue);
+ }
+};
+
+template <typename RefCountedT>
+struct ContentLessObjectCacheKeyFuncs {
+ static_assert(
+ std::is_same_v<RefCountedT*,
+ std::variant_alternative_t<static_cast<size_t>(KeyType::Pointer),
+ ContentLessObjectCacheKey<RefCountedT>>>);
+ static_assert(
+ std::is_same_v<WeakRefAndHash<RefCountedT>,
+ std::variant_alternative_t<static_cast<size_t>(KeyType::WeakRef),
+ ContentLessObjectCacheKey<RefCountedT>>>);
+ static_assert(
+ std::is_same_v<ForErase<RefCountedT>,
+ std::variant_alternative_t<static_cast<size_t>(KeyType::ForErase),
+ ContentLessObjectCacheKey<RefCountedT>>>);
+
+ struct HashFunc {
+ size_t operator()(const ContentLessObjectCacheKey<RefCountedT>& key) const {
+ return std::visit(ContentLessObjectCacheHashVisitor<RefCountedT>(), key);
+ }
+ };
+
+ using BaseEqualityFunc = typename RefCountedT::EqualityFunc;
+ struct EqualityFunc {
+ bool operator()(const ContentLessObjectCacheKey<RefCountedT>& a,
+ const ContentLessObjectCacheKey<RefCountedT>& b) const {
+ // First check if we are in the erasing scenario. We need to determine this early
+ // because we handle the actual equality differently. Note that if either a or b is
+ // a ForErase, it is safe to use UnsafeGet for both a and b because either:
+ // (1) a == b, in which case that means we are destroying the last copy and must be
+ // valid because cached objects must uncache themselves before being completely
+ // destroyed.
+ // (2) a != b, in which case the lock on the cache guarantees that the element in the
+ // cache has not been erased yet and hence cannot have been destroyed.
+ bool erasing = std::holds_alternative<ForErase<RefCountedT>>(a) ||
+ std::holds_alternative<ForErase<RefCountedT>>(b);
+
+ auto ExtractKey = [](bool erasing, const ContentLessObjectCacheKey<RefCountedT>& x)
+ -> std::pair<RefCountedT*, Ref<RefCountedT>> {
+ RefCountedT* xPtr = nullptr;
+ Ref<RefCountedT> xRef;
+ switch (static_cast<KeyType>(x.index())) {
+ case KeyType::Pointer:
+ xPtr = std::get<RefCountedT*>(x);
+ break;
+ case KeyType::WeakRef:
+ if (erasing) {
+ xPtr = std::get<WeakRefAndHash<RefCountedT>>(x).first.UnsafeGet();
+ } else {
+ xRef = std::get<WeakRefAndHash<RefCountedT>>(x).first.Promote();
+ xPtr = xRef.Get();
+ }
+ break;
+ case KeyType::ForErase:
+ xPtr = std::get<ForErase<RefCountedT>>(x).mValue;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {xPtr, xRef};
+ };
+
+ auto [aPtr, aRef] = ExtractKey(erasing, a);
+ auto [bPtr, bRef] = ExtractKey(erasing, b);
+ if (aPtr == nullptr || bPtr == nullptr) {
+ return false;
+ }
+ if (erasing) {
+ return aPtr == bPtr;
+ }
+ return BaseEqualityFunc()(aPtr, bPtr);
+ }
+ };
+};
+
+} // namespace detail
+
template <typename RefCountedT>
class ContentLessObjectCache {
+ static_assert(std::is_base_of_v<detail::ContentLessObjectCacheableBase, RefCountedT>,
+ "Type must be cacheable to use with ContentLessObjectCache.");
+ static_assert(std::is_base_of_v<RefCounted, RefCountedT>,
+ "Type must be refcounted to use with ContentLessObjectCache.");
+
public:
- // The dtor asserts that the cache is empty to aid in finding pointer leaks that can be possible
- // if the RefCountedT doesn't correctly implement the DeleteThis function to erase itself from
- // the cache.
+ // The dtor asserts that the cache is empty to aid in finding pointer leaks that can be
+ // possible if the RefCountedT doesn't correctly implement the DeleteThis function to Uncache.
~ContentLessObjectCache() { ASSERT(Empty()); }
- // Inserts the object into the cache returning a pair where the first is a Ref to the inserted
- // or existing object, and the second is a bool that is true if we inserted `object` and false
- // otherwise.
- std::pair<Ref<RefCountedT>, bool> Insert(RefCountedT* object) {
+ // Inserts the object into the cache returning a pair where the first is a Ref to the
+ // inserted or existing object, and the second is a bool that is true if we inserted
+ // `object` and false otherwise.
+ std::pair<Ref<RefCountedT>, bool> Insert(Ref<RefCountedT> obj) {
std::lock_guard<std::mutex> lock(mMutex);
- auto [it, inserted] = mCache.insert(object);
+ detail::WeakRefAndHash<RefCountedT> weakref =
+ std::make_pair(obj.GetWeakRef(), typename RefCountedT::HashFunc()(obj.Get()));
+ auto [it, inserted] = mCache.insert(weakref);
if (inserted) {
- return {object, inserted};
+ obj->mCache = this;
+ return {obj, inserted};
} else {
- // We need to check that the found instance isn't about to be destroyed. If it is, we
- // actually want to remove the old instance from the cache and insert this one. This can
- // happen if the last ref of the current instance in the cache hit is already in the
- // process of being removed but hasn't completed yet.
- Ref<RefCountedT> ref = TryGetRef(static_cast<RefCountedT*>(*it));
+ // Try to promote the found WeakRef to a Ref. If promotion fails, remove the old Key
+ // and insert this one.
+ Ref<RefCountedT> ref =
+ std::get<detail::WeakRefAndHash<RefCountedT>>(*it).first.Promote();
if (ref != nullptr) {
return {ref, false};
} else {
mCache.erase(it);
- auto result = mCache.insert(object);
+ auto result = mCache.insert(weakref);
ASSERT(result.second);
- return {object, true};
+ obj->mCache = this;
+ return {obj, true};
}
}
}
- // Returns a valid Ref<T> if the underlying RefCounted object's refcount has not reached 0.
- // Otherwise, returns nullptr.
+ // Returns a valid Ref<T> if we can Promote the underlying WeakRef. Returns nullptr otherwise.
Ref<RefCountedT> Find(RefCountedT* blueprint) {
std::lock_guard<std::mutex> lock(mMutex);
auto it = mCache.find(blueprint);
if (it != mCache.end()) {
- return TryGetRef(static_cast<RefCountedT*>(*it));
+ return std::get<detail::WeakRefAndHash<RefCountedT>>(*it).first.Promote();
}
return nullptr;
}
// Erases the object from the cache if it exists and are pointer equal. Otherwise does not
// modify the cache.
- void Erase(RefCountedT* object) {
+ void Erase(RefCountedT* obj) {
std::lock_guard<std::mutex> lock(mMutex);
- auto it = mCache.find(object);
- if (*it == object) {
- mCache.erase(it);
+ auto it = mCache.find(detail::ForErase<RefCountedT>(obj));
+ if (it == mCache.end()) {
+ return;
}
+ obj->mCache = nullptr;
+ mCache.erase(it);
}
// Returns true iff the cache is empty.
@@ -92,9 +211,9 @@
private:
std::mutex mMutex;
- std::unordered_set<RefCountedT*,
- typename RefCountedT::HashFunc,
- typename RefCountedT::EqualityFunc>
+ std::unordered_set<detail::ContentLessObjectCacheKey<RefCountedT>,
+ typename detail::ContentLessObjectCacheKeyFuncs<RefCountedT>::HashFunc,
+ typename detail::ContentLessObjectCacheKeyFuncs<RefCountedT>::EqualityFunc>
mCache;
};
diff --git a/src/dawn/common/ContentLessObjectCacheable.h b/src/dawn/common/ContentLessObjectCacheable.h
new file mode 100644
index 0000000..eae50e9
--- /dev/null
+++ b/src/dawn/common/ContentLessObjectCacheable.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
+#define SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
+
+#include "dawn/common/WeakRefSupport.h"
+
+namespace dawn {
+
+template <typename RefCountedT>
+class ContentLessObjectCache;
+
+namespace detail {
+
+// Placeholding base class for cacheable types to enable easier compile-time verifications.
+class ContentLessObjectCacheableBase {};
+
+} // namespace detail
+
+// Classes need to extend this type if they want to be cacheable via the ContentLessObjectCache. It
+// is also assumed that the type already extends RefCounted in some way. Otherwise, this helper
+// class does not work.
+template <typename RefCountedT>
+class ContentLessObjectCacheable : public detail::ContentLessObjectCacheableBase,
+ public WeakRefSupport<RefCountedT> {
+ public:
+ // Currently, any cacheables should call Uncache in their DeleteThis override. This is important
+ // because otherwise, the objects may be leaked in the internal set.
+ void Uncache() {
+ if (mCache != nullptr) {
+ // Note that Erase sets mCache to nullptr. We do it in Erase instead of doing it here in
+ // case users call Erase somewhere else before the Uncache call.
+ mCache->Erase(static_cast<RefCountedT*>(this));
+ }
+ }
+
+ protected:
+ // The dtor asserts that the cache isn't set to ensure that we were Uncache-d or never cached.
+ ~ContentLessObjectCacheable() override { ASSERT(mCache == nullptr); }
+
+ private:
+ friend class ContentLessObjectCache<RefCountedT>;
+
+ // Pointer to the owning cache if we were inserted at any point. This is set via the
+ // Insert/Erase functions on the cache.
+ ContentLessObjectCache<RefCountedT>* mCache = nullptr;
+};
+
+} // namespace dawn
+
+#endif // SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
index 3424271..6971862 100644
--- a/src/dawn/common/RefCounted.h
+++ b/src/dawn/common/RefCounted.h
@@ -71,20 +71,6 @@
// synchronization in place for destruction.
void Release();
- // Tries to return a valid Ref to `object` if it's internal refcount is not already 0. If the
- // internal refcount has already reached 0, returns nullptr instead.
- // TODO(dawn:1769) Remove this once ContentLessObjectCache is converted to use WeakRefs.
- template <typename T, typename = typename std::is_convertible<T, RefCounted>>
- friend Ref<T> TryGetRef(T* object) {
- // Since this is called on the RefCounted class directly, and can race with destruction, we
- // verify that we can safely increment the refcount first, create the Ref, then decrement
- // the refcount in that order to ensure that the resultant Ref is a valid Ref.
- if (!object->mRefCount.TryIncrement()) {
- return nullptr;
- }
- return AcquireRef(object);
- }
-
void APIReference() { Reference(); }
// APIRelease() can be called without any synchronization guarantees so we need to use a Release
// method that will call LockAndDeleteThis() on destruction.
diff --git a/src/dawn/common/WeakRef.h b/src/dawn/common/WeakRef.h
index 4764924..26485f6 100644
--- a/src/dawn/common/WeakRef.h
+++ b/src/dawn/common/WeakRef.h
@@ -49,15 +49,29 @@
return *this;
}
+ // Constructor from explicit WeakRefSupport<T>* is allowed.
+ // NOLINTNEXTLINE(runtime/explicit)
+ WeakRef(WeakRefSupport<T>* support) : mData(support->mData) {}
+
// Promotes a WeakRef to a Ref. Access to the raw pointer is not allowed because a raw pointer
// could become invalid after being retrieved.
- Ref<T> Promote() {
+ Ref<T> Promote() const {
if (mData != nullptr) {
return AcquireRef(static_cast<T*>(mData->TryGetRef().Detach()));
}
return nullptr;
}
+ // Returns the raw pointer to the RefCountedT if it has not been invalidated. Note that this
+ // function is not thread-safe since the returned pointer can become invalid after being
+ // retrieved.
+ T* UnsafeGet() const {
+ if (mData != nullptr) {
+ return static_cast<T*>(mData->UnsafeGet());
+ }
+ return nullptr;
+ }
+
private:
// Friend is needed so that we can access the data ref in conversions.
template <typename U>
diff --git a/src/dawn/common/WeakRefSupport.cpp b/src/dawn/common/WeakRefSupport.cpp
index 9273ff1..a85a9d7 100644
--- a/src/dawn/common/WeakRefSupport.cpp
+++ b/src/dawn/common/WeakRefSupport.cpp
@@ -28,6 +28,10 @@
return AcquireRef(mValue);
}
+RefCounted* WeakRefData::UnsafeGet() const {
+ return mValue;
+}
+
void WeakRefData::Invalidate() {
std::lock_guard<std::mutex> lock(mMutex);
mValue = nullptr;
diff --git a/src/dawn/common/WeakRefSupport.h b/src/dawn/common/WeakRefSupport.h
index 9702905..de37965 100644
--- a/src/dawn/common/WeakRefSupport.h
+++ b/src/dawn/common/WeakRefSupport.h
@@ -34,6 +34,10 @@
// internal refcount has already reached 0, returns nullptr instead.
Ref<RefCounted> TryGetRef();
+ // Returns the raw pointer to the RefCounted. In general, this is an unsafe operation because
+ // the RefCounted can become invalid after being retrieved.
+ RefCounted* UnsafeGet() const;
+
private:
std::mutex mMutex;
RefCounted* mValue = nullptr;
@@ -43,7 +47,7 @@
class WeakRefSupportBase {
protected:
explicit WeakRefSupportBase(Ref<detail::WeakRefData> data);
- ~WeakRefSupportBase();
+ virtual ~WeakRefSupportBase();
private:
template <typename T>
diff --git a/src/dawn/native/AttachmentState.cpp b/src/dawn/native/AttachmentState.cpp
index 1e7e370..9ece45d 100644
--- a/src/dawn/native/AttachmentState.cpp
+++ b/src/dawn/native/AttachmentState.cpp
@@ -122,10 +122,9 @@
SetContentHash(blueprint.GetContentHash());
}
-AttachmentState::~AttachmentState() {
- if (IsCachedReference()) {
- GetDevice()->UncacheAttachmentState(this);
- }
+void AttachmentState::DeleteThis() {
+ Uncache();
+ RefCounted::DeleteThis();
}
bool AttachmentState::EqualityFunc::operator()(const AttachmentState* a,
diff --git a/src/dawn/native/AttachmentState.h b/src/dawn/native/AttachmentState.h
index fcb0e43..c17faae 100644
--- a/src/dawn/native/AttachmentState.h
+++ b/src/dawn/native/AttachmentState.h
@@ -19,6 +19,7 @@
#include <bitset>
#include "dawn/common/Constants.h"
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
#include "dawn/native/CachedObject.h"
@@ -31,7 +32,9 @@
class DeviceBase;
-class AttachmentState final : public ObjectBase, public CachedObject {
+class AttachmentState final : public ObjectBase,
+ public CachedObject,
+ public ContentLessObjectCacheable<AttachmentState> {
public:
// Note: Descriptors must be validated before the AttachmentState is constructed.
explicit AttachmentState(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
@@ -41,8 +44,6 @@
// Constructor used to avoid re-parsing descriptors when we already parsed them for cache keys.
AttachmentState(const AttachmentState& blueprint);
- ~AttachmentState() override;
-
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
bool HasDepthStencilAttachment() const;
@@ -55,6 +56,9 @@
};
size_t ComputeContentHash() override;
+ protected:
+ void DeleteThis() override;
+
private:
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
diff --git a/src/dawn/native/BindGroupLayout.cpp b/src/dawn/native/BindGroupLayout.cpp
index 4324548..6bb8c5f 100644
--- a/src/dawn/native/BindGroupLayout.cpp
+++ b/src/dawn/native/BindGroupLayout.cpp
@@ -505,10 +505,7 @@
BindGroupLayoutBase::~BindGroupLayoutBase() = default;
void BindGroupLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheBindGroupLayout(this);
- }
+ Uncache();
}
// static
diff --git a/src/dawn/native/BindGroupLayout.h b/src/dawn/native/BindGroupLayout.h
index c2ffb97..77042b9 100644
--- a/src/dawn/native/BindGroupLayout.h
+++ b/src/dawn/native/BindGroupLayout.h
@@ -21,6 +21,7 @@
#include <string>
#include "dawn/common/Constants.h"
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/SlabAllocator.h"
#include "dawn/common/ityp_span.h"
#include "dawn/common/ityp_vector.h"
@@ -49,7 +50,9 @@
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.
-class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+class BindGroupLayoutBase : public ApiObjectBase,
+ public CachedObject,
+ public ContentLessObjectCacheable<BindGroupLayoutBase> {
public:
BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
diff --git a/src/dawn/native/CachedObject.cpp b/src/dawn/native/CachedObject.cpp
index bf2be11..3460bc1 100644
--- a/src/dawn/native/CachedObject.cpp
+++ b/src/dawn/native/CachedObject.cpp
@@ -19,14 +19,6 @@
namespace dawn::native {
-bool CachedObject::IsCachedReference() const {
- return mIsCachedReference;
-}
-
-void CachedObject::SetIsCachedReference() {
- mIsCachedReference = true;
-}
-
size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
return obj->GetContentHash();
}
diff --git a/src/dawn/native/CachedObject.h b/src/dawn/native/CachedObject.h
index 1040f54..5b7afec 100644
--- a/src/dawn/native/CachedObject.h
+++ b/src/dawn/native/CachedObject.h
@@ -24,13 +24,9 @@
namespace dawn::native {
// Some objects are cached so that instead of creating new duplicate objects, we increase the
-// refcount of an existing object. When an object is successfully created, the device should call
-// SetIsCachedReference() and insert the object into the cache.
+// refcount of an existing object.
class CachedObject {
public:
- bool IsCachedReference() const;
- void SetIsCachedReference();
-
// Functor necessary for the unordered_set<CachedObject*>-based cache.
struct HashFunc {
size_t operator()(const CachedObject* obj) const;
@@ -50,8 +46,6 @@
// Called by ObjectContentHasher upon creation to record the object.
virtual size_t ComputeContentHash() = 0;
- bool mIsCachedReference = false;
-
size_t mContentHash = 0;
bool mIsContentHashInitialized = false;
};
diff --git a/src/dawn/native/ComputePipeline.cpp b/src/dawn/native/ComputePipeline.cpp
index 99364e7..9916a0a 100644
--- a/src/dawn/native/ComputePipeline.cpp
+++ b/src/dawn/native/ComputePipeline.cpp
@@ -64,10 +64,7 @@
ComputePipelineBase::~ComputePipelineBase() = default;
void ComputePipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheComputePipeline(this);
- }
+ Uncache();
}
// static
diff --git a/src/dawn/native/ComputePipeline.h b/src/dawn/native/ComputePipeline.h
index 85794ec..47662ba 100644
--- a/src/dawn/native/ComputePipeline.h
+++ b/src/dawn/native/ComputePipeline.h
@@ -15,6 +15,7 @@
#ifndef SRC_DAWN_NATIVE_COMPUTEPIPELINE_H_
#define SRC_DAWN_NATIVE_COMPUTEPIPELINE_H_
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/NonCopyable.h"
#include "dawn/native/Forward.h"
#include "dawn/native/Pipeline.h"
@@ -27,7 +28,8 @@
MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
const ComputePipelineDescriptor* descriptor);
-class ComputePipelineBase : public PipelineBase {
+class ComputePipelineBase : public PipelineBase,
+ public ContentLessObjectCacheable<ComputePipelineBase> {
public:
ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
~ComputePipelineBase() override;
diff --git a/src/dawn/native/Device.cpp b/src/dawn/native/Device.cpp
index 2a77e66..72e81e6 100644
--- a/src/dawn/native/Device.cpp
+++ b/src/dawn/native/Device.cpp
@@ -103,9 +103,6 @@
bool inserted = false;
std::tie(result, inserted) = cache.Insert(result.Get());
- if (inserted) {
- result->SetIsCachedReference();
- }
return ReturnType(result);
}
@@ -856,11 +853,6 @@
});
}
-void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->bindGroupLayouts.Erase(obj);
-}
-
// Private function used at initialization
ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
BindGroupLayoutDescriptor desc = {};
@@ -901,30 +893,15 @@
Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
Ref<ComputePipelineBase> computePipeline) {
ASSERT(IsLockedByCurrentThreadIfNeeded());
- auto [cachedPipeline, inserted] = mCaches->computePipelines.Insert(computePipeline.Get());
- if (inserted) {
- computePipeline->SetIsCachedReference();
- return computePipeline;
- } else {
- return std::move(cachedPipeline);
- }
+ auto [pipeline, _] = mCaches->computePipelines.Insert(computePipeline.Get());
+ return std::move(pipeline);
}
Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
Ref<RenderPipelineBase> renderPipeline) {
ASSERT(IsLockedByCurrentThreadIfNeeded());
- auto [cachedPipeline, inserted] = mCaches->renderPipelines.Insert(renderPipeline.Get());
- if (inserted) {
- renderPipeline->SetIsCachedReference();
- return renderPipeline;
- } else {
- return std::move(cachedPipeline);
- }
-}
-
-void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->computePipelines.Erase(obj);
+ auto [pipeline, _] = mCaches->renderPipelines.Insert(renderPipeline.Get());
+ return std::move(pipeline);
}
ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateImplicitMSAARenderTextureViewFor(
@@ -997,16 +974,6 @@
});
}
-void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->pipelineLayouts.Erase(obj);
-}
-
-void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->renderPipelines.Erase(obj);
-}
-
ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
const SamplerDescriptor* descriptor) {
SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
@@ -1022,11 +989,6 @@
});
}
-void DeviceBase::UncacheSampler(SamplerBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->samplers.Erase(obj);
-}
-
ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult,
@@ -1063,15 +1025,9 @@
});
}
-void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->shaderModules.Erase(obj);
-}
-
Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentState* blueprint) {
return GetOrCreate(mCaches->attachmentStates, blueprint, [&]() -> Ref<AttachmentState> {
- Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(*blueprint));
- return attachmentState;
+ return AcquireRef(new AttachmentState(*blueprint));
});
}
@@ -1093,11 +1049,6 @@
return GetOrCreateAttachmentState(&blueprint);
}
-void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
- ASSERT(obj->IsCachedReference());
- mCaches->attachmentStates.Erase(obj);
-}
-
Ref<PipelineCacheBase> DeviceBase::GetOrCreatePipelineCache(const CacheKey& key) {
return GetOrCreatePipelineCacheImpl(key);
}
diff --git a/src/dawn/native/Device.h b/src/dawn/native/Device.h
index 0154474..03ee648 100644
--- a/src/dawn/native/Device.h
+++ b/src/dawn/native/Device.h
@@ -195,13 +195,10 @@
ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
- void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
BindGroupLayoutBase* GetEmptyBindGroupLayout();
PipelineLayoutBase* GetEmptyPipelineLayout();
- void UncacheComputePipeline(ComputePipelineBase* obj);
-
ResultOrError<Ref<TextureViewBase>> CreateImplicitMSAARenderTextureViewFor(
const TextureBase* singleSampledTexture,
uint32_t sampleCount);
@@ -210,25 +207,19 @@
ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor);
- void UncachePipelineLayout(PipelineLayoutBase* obj);
-
- void UncacheRenderPipeline(RenderPipelineBase* obj);
ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
- void UncacheSampler(SamplerBase* obj);
ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult,
OwnedCompilationMessages* compilationMessages);
- void UncacheShaderModule(ShaderModuleBase* obj);
Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentState* blueprint);
Ref<AttachmentState> GetOrCreateAttachmentState(
const RenderBundleEncoderDescriptor* descriptor);
Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
- void UncacheAttachmentState(AttachmentState* obj);
Ref<PipelineCacheBase> GetOrCreatePipelineCache(const CacheKey& key);
diff --git a/src/dawn/native/PipelineLayout.cpp b/src/dawn/native/PipelineLayout.cpp
index c11a86b..839efe9 100644
--- a/src/dawn/native/PipelineLayout.cpp
+++ b/src/dawn/native/PipelineLayout.cpp
@@ -84,10 +84,7 @@
PipelineLayoutBase::~PipelineLayoutBase() = default;
void PipelineLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncachePipelineLayout(this);
- }
+ Uncache();
}
// static
diff --git a/src/dawn/native/PipelineLayout.h b/src/dawn/native/PipelineLayout.h
index cd23026..6f89786 100644
--- a/src/dawn/native/PipelineLayout.h
+++ b/src/dawn/native/PipelineLayout.h
@@ -21,6 +21,7 @@
#include <vector>
#include "dawn/common/Constants.h"
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/ityp_array.h"
#include "dawn/common/ityp_bitset.h"
#include "dawn/native/BindingInfo.h"
@@ -49,7 +50,9 @@
ConstantEntry const* constants = nullptr;
};
-class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
+class PipelineLayoutBase : public ApiObjectBase,
+ public CachedObject,
+ public ContentLessObjectCacheable<PipelineLayoutBase> {
public:
PipelineLayoutBase(DeviceBase* device,
const PipelineLayoutDescriptor* descriptor,
diff --git a/src/dawn/native/RenderPipeline.cpp b/src/dawn/native/RenderPipeline.cpp
index 33dd20d..e8c7e4f 100644
--- a/src/dawn/native/RenderPipeline.cpp
+++ b/src/dawn/native/RenderPipeline.cpp
@@ -807,10 +807,7 @@
RenderPipelineBase::~RenderPipelineBase() = default;
void RenderPipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheRenderPipeline(this);
- }
+ Uncache();
// Remove reference to the attachment state so that we don't have lingering references to
// it preventing it from being uncached in the device.
diff --git a/src/dawn/native/RenderPipeline.h b/src/dawn/native/RenderPipeline.h
index ad5593a..0408a98 100644
--- a/src/dawn/native/RenderPipeline.h
+++ b/src/dawn/native/RenderPipeline.h
@@ -19,6 +19,7 @@
#include <bitset>
#include <vector>
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/TypedInteger.h"
#include "dawn/native/AttachmentState.h"
#include "dawn/native/Forward.h"
@@ -60,7 +61,8 @@
uint64_t lastStride;
};
-class RenderPipelineBase : public PipelineBase {
+class RenderPipelineBase : public PipelineBase,
+ public ContentLessObjectCacheable<RenderPipelineBase> {
public:
RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
~RenderPipelineBase() override;
diff --git a/src/dawn/native/Sampler.cpp b/src/dawn/native/Sampler.cpp
index 4e44d73..b45255a 100644
--- a/src/dawn/native/Sampler.cpp
+++ b/src/dawn/native/Sampler.cpp
@@ -95,10 +95,7 @@
SamplerBase::~SamplerBase() = default;
void SamplerBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheSampler(this);
- }
+ Uncache();
}
// static
diff --git a/src/dawn/native/Sampler.h b/src/dawn/native/Sampler.h
index 93e6b58..08c8bf8 100644
--- a/src/dawn/native/Sampler.h
+++ b/src/dawn/native/Sampler.h
@@ -15,6 +15,7 @@
#ifndef SRC_DAWN_NATIVE_SAMPLER_H_
#define SRC_DAWN_NATIVE_SAMPLER_H_
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/native/CachedObject.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
@@ -28,7 +29,9 @@
MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
-class SamplerBase : public ApiObjectBase, public CachedObject {
+class SamplerBase : public ApiObjectBase,
+ public CachedObject,
+ public ContentLessObjectCacheable<SamplerBase> {
public:
SamplerBase(DeviceBase* device,
const SamplerDescriptor* descriptor,
diff --git a/src/dawn/native/ShaderModule.cpp b/src/dawn/native/ShaderModule.cpp
index ece5cd9..fb9279b 100644
--- a/src/dawn/native/ShaderModule.cpp
+++ b/src/dawn/native/ShaderModule.cpp
@@ -1124,10 +1124,7 @@
ShaderModuleBase::~ShaderModuleBase() = default;
void ShaderModuleBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheShaderModule(this);
- }
+ Uncache();
}
// static
diff --git a/src/dawn/native/ShaderModule.h b/src/dawn/native/ShaderModule.h
index da58ad9..16be99a 100644
--- a/src/dawn/native/ShaderModule.h
+++ b/src/dawn/native/ShaderModule.h
@@ -25,6 +25,7 @@
#include <vector>
#include "dawn/common/Constants.h"
+#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/ityp_array.h"
#include "dawn/native/BindingInfo.h"
#include "dawn/native/CachedObject.h"
@@ -253,7 +254,9 @@
bool usesSampleMaskOutput = false;
};
-class ShaderModuleBase : public ApiObjectBase, public CachedObject {
+class ShaderModuleBase : public ApiObjectBase,
+ public CachedObject,
+ public ContentLessObjectCacheable<ShaderModuleBase> {
public:
ShaderModuleBase(DeviceBase* device,
const ShaderModuleDescriptor* descriptor,
diff --git a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
index 4ad91c6..7ecacd5 100644
--- a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
+++ b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
@@ -28,75 +28,72 @@
using utils::BinarySemaphore;
-class RefCountedT : public RefCounted {
+class CacheableT : public RefCounted, public ContentLessObjectCacheable<CacheableT> {
public:
- explicit RefCountedT(size_t value) : mValue(value) {}
- RefCountedT(size_t value, std::function<void(RefCountedT*)> deleteFn)
+ explicit CacheableT(size_t value) : mValue(value) {}
+ CacheableT(size_t value, std::function<void(CacheableT*)> deleteFn)
: mValue(value), mDeleteFn(deleteFn) {}
- ~RefCountedT() override { mDeleteFn(this); }
+ ~CacheableT() override { mDeleteFn(this); }
struct HashFunc {
- size_t operator()(const RefCountedT* x) const { return x->mValue; }
+ size_t operator()(const CacheableT* x) const { return x->mValue; }
};
struct EqualityFunc {
- bool operator()(const RefCountedT* l, const RefCountedT* r) const {
+ bool operator()(const CacheableT* l, const CacheableT* r) const {
return l->mValue == r->mValue;
}
};
private:
size_t mValue;
- std::function<void(RefCountedT*)> mDeleteFn = [](RefCountedT*) -> void {};
+ std::function<void(CacheableT*)> mDeleteFn = [](CacheableT*) -> void {};
};
// Empty cache returns true on Empty().
TEST(ContentLessObjectCacheTest, Empty) {
- ContentLessObjectCache<RefCountedT> cache;
+ ContentLessObjectCache<CacheableT> cache;
EXPECT_TRUE(cache.Empty());
}
// Non-empty cache returns false on Empty().
TEST(ContentLessObjectCacheTest, NonEmpty) {
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object =
- AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
- EXPECT_TRUE(cache.Insert(object.Get()).second);
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
+ EXPECT_TRUE(cache.Insert(object).second);
EXPECT_FALSE(cache.Empty());
}
// Object inserted into the cache are findable.
TEST(ContentLessObjectCacheTest, Insert) {
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object =
- AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
EXPECT_TRUE(cache.Insert(object.Get()).second);
- RefCountedT blueprint(1);
- Ref<RefCountedT> cached = cache.Find(&blueprint);
+ CacheableT blueprint(1);
+ Ref<CacheableT> cached = cache.Find(&blueprint);
EXPECT_TRUE(object.Get() == cached.Get());
}
// Duplicate insert calls on different objects with the same hash only inserts the first.
TEST(ContentLessObjectCacheTest, InsertDuplicate) {
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object1 =
- AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
EXPECT_TRUE(cache.Insert(object1.Get()).second);
- Ref<RefCountedT> object2 = AcquireRef(new RefCountedT(1));
+ Ref<CacheableT> object2 = AcquireRef(new CacheableT(1));
EXPECT_FALSE(cache.Insert(object2.Get()).second);
- RefCountedT blueprint(1);
- Ref<RefCountedT> cached = cache.Find(&blueprint);
+ CacheableT blueprint(1);
+ Ref<CacheableT> cached = cache.Find(&blueprint);
EXPECT_TRUE(object1.Get() == cached.Get());
}
// Erasing the only entry leaves the cache empty.
TEST(ContentLessObjectCacheTest, Erase) {
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object = AcquireRef(new RefCountedT(1));
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object = AcquireRef(new CacheableT(1));
EXPECT_TRUE(cache.Insert(object.Get()).second);
EXPECT_FALSE(cache.Empty());
@@ -106,13 +103,12 @@
// Erasing a hash equivalent but not pointer equivalent entry is a no-op.
TEST(ContentLessObjectCacheTest, EraseDuplicate) {
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object1 =
- AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
EXPECT_TRUE(cache.Insert(object1.Get()).second);
EXPECT_FALSE(cache.Empty());
- Ref<RefCountedT> object2 = AcquireRef(new RefCountedT(1));
+ Ref<CacheableT> object2 = AcquireRef(new CacheableT(1));
cache.Erase(object2.Get());
EXPECT_FALSE(cache.Empty());
}
@@ -121,21 +117,21 @@
TEST(ContentLessObjectCacheTest, InsertingAndFinding) {
constexpr size_t kNumObjects = 100;
constexpr size_t kNumThreads = 8;
- ContentLessObjectCache<RefCountedT> cache;
- std::vector<Ref<RefCountedT>> objects(kNumObjects);
+ ContentLessObjectCache<CacheableT> cache;
+ std::vector<Ref<CacheableT>> objects(kNumObjects);
auto f = [&] {
for (size_t i = 0; i < kNumObjects; i++) {
- Ref<RefCountedT> object =
- AcquireRef(new RefCountedT(i, [&](RefCountedT* x) { cache.Erase(x); }));
+ Ref<CacheableT> object =
+ AcquireRef(new CacheableT(i, [&](CacheableT* x) { cache.Erase(x); }));
if (cache.Insert(object.Get()).second) {
// This shouldn't race because exactly 1 thread should successfully insert.
objects[i] = object;
}
}
for (size_t i = 0; i < kNumObjects; i++) {
- RefCountedT blueprint(i);
- Ref<RefCountedT> cached = cache.Find(&blueprint);
+ CacheableT blueprint(i);
+ Ref<CacheableT> cached = cache.Find(&blueprint);
EXPECT_NE(cached.Get(), nullptr);
EXPECT_EQ(cached.Get(), objects[i].Get());
}
@@ -154,8 +150,8 @@
TEST(ContentLessObjectCacheTest, FindDeleting) {
BinarySemaphore semA, semB;
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object = AcquireRef(new RefCountedT(1, [&](RefCountedT* x) {
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) {
semA.Release();
semB.Acquire();
cache.Erase(x);
@@ -167,7 +163,7 @@
// Thread B will try to Find the entry before it is completely destroyed.
auto threadB = [&] {
semA.Acquire();
- RefCountedT blueprint(1);
+ CacheableT blueprint(1);
EXPECT_TRUE(cache.Find(&blueprint) == nullptr);
semB.Release();
};
@@ -183,16 +179,15 @@
TEST(ContentLessObjectCacheTest, InsertDeleting) {
BinarySemaphore semA, semB;
- ContentLessObjectCache<RefCountedT> cache;
- Ref<RefCountedT> object1 = AcquireRef(new RefCountedT(1, [&](RefCountedT* x) {
+ ContentLessObjectCache<CacheableT> cache;
+ Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) {
semA.Release();
semB.Acquire();
cache.Erase(x);
}));
EXPECT_TRUE(cache.Insert(object1.Get()).second);
- Ref<RefCountedT> object2 =
- AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
+ Ref<CacheableT> object2 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
// Thread A will release the last reference of the original object.
auto threadA = [&] { object1 = nullptr; };
@@ -209,8 +204,8 @@
tA.join();
tB.join();
- RefCountedT blueprint(1);
- Ref<RefCountedT> cached = cache.Find(&blueprint);
+ CacheableT blueprint(1);
+ Ref<CacheableT> cached = cache.Find(&blueprint);
EXPECT_TRUE(object2.Get() == cached.Get());
}
diff --git a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
index 74d8054..3bc14e5 100644
--- a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
@@ -162,7 +162,6 @@
wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(ToCppAPI(&desc));
EXPECT_TRUE(FromAPI(bindGroupLayout.Get())->IsAlive());
- EXPECT_TRUE(FromAPI(bindGroupLayout.Get())->IsCachedReference());
}
}
@@ -343,7 +342,6 @@
wgpu::ComputePipeline computePipeline = device.CreateComputePipeline(ToCppAPI(&desc));
EXPECT_TRUE(FromAPI(computePipeline.Get())->IsAlive());
- EXPECT_TRUE(FromAPI(computePipeline.Get())->IsCachedReference());
}
}
@@ -482,7 +480,6 @@
wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(ToCppAPI(&desc));
EXPECT_TRUE(FromAPI(pipelineLayout.Get())->IsAlive());
- EXPECT_TRUE(FromAPI(pipelineLayout.Get())->IsCachedReference());
}
}
@@ -572,7 +569,6 @@
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(ToCppAPI(&desc));
EXPECT_TRUE(FromAPI(renderPipeline.Get())->IsAlive());
- EXPECT_TRUE(FromAPI(renderPipeline.Get())->IsCachedReference());
}
}
@@ -602,7 +598,6 @@
wgpu::Sampler sampler = device.CreateSampler(ToCppAPI(&desc));
EXPECT_TRUE(FromAPI(sampler.Get())->IsAlive());
- EXPECT_TRUE(FromAPI(sampler.Get())->IsCachedReference());
}
}