Revert "Updates ContentLessObjectCache to use WeakRefs."

This reverts commit 5759b5539e7902149f31cd5c5392b504f5fd4cab.

Reason for revert: Breaking roll into Chromium.
Bug: chromium:1466134

Original change's description:
> Updates ContentLessObjectCache to use WeakRefs.
>
> - Re-implements the cache using WeakRefs.
> - Cleans up old Uncache* functions that are no longer needed.
> - Removes "cached" state from CachedObject since it's no longer needed.
> - Minor updates to related tests.
>
> Bug: dawn:1769
> Change-Id: I25cf5d3d5d3c9bcd793239db735304e74e9d6b76
> Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/139283
> Reviewed-by: Austin Eng <enga@chromium.org>
> Kokoro: Kokoro <noreply+kokoro@google.com>
> Reviewed-by: Corentin Wallez <cwallez@chromium.org>
> Commit-Queue: Loko Kung <lokokung@google.com>

TBR=cwallez@chromium.org,enga@chromium.org,noreply+kokoro@google.com,dawn-scoped@luci-project-accounts.iam.gserviceaccount.com,lokokung@google.com

Change-Id: Iead07169439f4dfa8a7ffb28a38baa3e0646a4b7
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: dawn:1769
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/141940
Reviewed-by: Loko Kung <lokokung@google.com>
Commit-Queue: Loko Kung <lokokung@google.com>
Kokoro: Austin Eng <enga@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
diff --git a/src/dawn/common/BUILD.gn b/src/dawn/common/BUILD.gn
index 6b72bb5..297565f 100644
--- a/src/dawn/common/BUILD.gn
+++ b/src/dawn/common/BUILD.gn
@@ -238,7 +238,6 @@
       "ConcurrentCache.h",
       "Constants.h",
       "ContentLessObjectCache.h",
-      "ContentLessObjectCacheable.h",
       "CoreFoundationRef.h",
       "DynamicLib.cpp",
       "DynamicLib.h",
diff --git a/src/dawn/common/CMakeLists.txt b/src/dawn/common/CMakeLists.txt
index ca78909..cb1b388 100644
--- a/src/dawn/common/CMakeLists.txt
+++ b/src/dawn/common/CMakeLists.txt
@@ -41,7 +41,6 @@
     "ConcurrentCache.h"
     "Constants.h"
     "ContentLessObjectCache.h"
-    "ContentLessObjectCacheable.h"
     "CoreFoundationRef.h"
     "DynamicLib.cpp"
     "DynamicLib.h"
diff --git a/src/dawn/common/ContentLessObjectCache.h b/src/dawn/common/ContentLessObjectCache.h
index 6654777..551f796 100644
--- a/src/dawn/common/ContentLessObjectCache.h
+++ b/src/dawn/common/ContentLessObjectCache.h
@@ -20,187 +20,68 @@
 #include <type_traits>
 #include <unordered_set>
 #include <utility>
-#include <variant>
 
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/Ref.h"
 #include "dawn/common/RefCounted.h"
-#include "dawn/common/WeakRef.h"
 
 namespace dawn {
-namespace detail {
 
-// Tagged-type to force special path for EqualityFunc when dealing with Erase. When erasing, we only
-// care about pointer equality, not value equality. This is also particularly important because
-// trying to promote on the Erase path can cause failures as the object's last ref could've been
-// dropped already.
-template <typename RefCountedT>
-struct ForErase {
-    explicit ForErase(RefCountedT* value) : mValue(value) {}
-    RefCountedT* mValue;
-};
-
-// All cached WeakRefs must have an immutable hash value determined at insertion. This ensures that
-// even if the last ref of the cached value is dropped, we still get the same hash in the set for
-// erasing.
-template <typename RefCountedT>
-using WeakRefAndHash = std::pair<WeakRef<RefCountedT>, size_t>;
-
-// The cache always holds WeakRefs internally, however, to enable lookups using pointers and special
-// Erase equality, we use a variant type to branch.
-template <typename RefCountedT>
-using ContentLessObjectCacheKey =
-    std::variant<RefCountedT*, WeakRefAndHash<RefCountedT>, ForErase<RefCountedT>>;
-
-enum class KeyType : size_t { Pointer = 0, WeakRef = 1, ForErase = 2 };
-
-template <typename RefCountedT>
-struct ContentLessObjectCacheHashVisitor {
-    using BaseHashFunc = typename RefCountedT::HashFunc;
-
-    size_t operator()(const RefCountedT* ptr) const { return BaseHashFunc()(ptr); }
-    size_t operator()(const WeakRefAndHash<RefCountedT>& weakref) const { return weakref.second; }
-    size_t operator()(const ForErase<RefCountedT>& forErase) const {
-        return BaseHashFunc()(forErase.mValue);
-    }
-};
-
-template <typename RefCountedT>
-struct ContentLessObjectCacheKeyFuncs {
-    static_assert(
-        std::is_same_v<RefCountedT*,
-                       std::variant_alternative_t<static_cast<size_t>(KeyType::Pointer),
-                                                  ContentLessObjectCacheKey<RefCountedT>>>);
-    static_assert(
-        std::is_same_v<WeakRefAndHash<RefCountedT>,
-                       std::variant_alternative_t<static_cast<size_t>(KeyType::WeakRef),
-                                                  ContentLessObjectCacheKey<RefCountedT>>>);
-    static_assert(
-        std::is_same_v<ForErase<RefCountedT>,
-                       std::variant_alternative_t<static_cast<size_t>(KeyType::ForErase),
-                                                  ContentLessObjectCacheKey<RefCountedT>>>);
-
-    struct HashFunc {
-        size_t operator()(const ContentLessObjectCacheKey<RefCountedT>& key) const {
-            return std::visit(ContentLessObjectCacheHashVisitor<RefCountedT>(), key);
-        }
-    };
-
-    using BaseEqualityFunc = typename RefCountedT::EqualityFunc;
-    struct EqualityFunc {
-        bool operator()(const ContentLessObjectCacheKey<RefCountedT>& a,
-                        const ContentLessObjectCacheKey<RefCountedT>& b) const {
-            // First check if we are in the erasing scenario. We need to determine this early
-            // because we handle the actual equality differently. Note that if either a or b is
-            // a ForErase, it is safe to use UnsafeGet for both a and b because either:
-            //   (1) a == b, in which case that means we are destroying the last copy and must be
-            //       valid because cached objects must uncache themselves before being completely
-            //       destroyed.
-            //   (2) a != b, in which case the lock on the cache guarantees that the element in the
-            //       cache has not been erased yet and hence cannot have been destroyed.
-            bool erasing = std::holds_alternative<ForErase<RefCountedT>>(a) ||
-                           std::holds_alternative<ForErase<RefCountedT>>(b);
-
-            auto ExtractKey = [](bool erasing, const ContentLessObjectCacheKey<RefCountedT>& x)
-                -> std::pair<RefCountedT*, Ref<RefCountedT>> {
-                RefCountedT* xPtr = nullptr;
-                Ref<RefCountedT> xRef;
-                switch (static_cast<KeyType>(x.index())) {
-                    case KeyType::Pointer:
-                        xPtr = std::get<RefCountedT*>(x);
-                        break;
-                    case KeyType::WeakRef:
-                        if (erasing) {
-                            xPtr = std::get<WeakRefAndHash<RefCountedT>>(x).first.UnsafeGet();
-                        } else {
-                            xRef = std::get<WeakRefAndHash<RefCountedT>>(x).first.Promote();
-                            xPtr = xRef.Get();
-                        }
-                        break;
-                    case KeyType::ForErase:
-                        xPtr = std::get<ForErase<RefCountedT>>(x).mValue;
-                        break;
-                    default:
-                        UNREACHABLE();
-                }
-                return {xPtr, xRef};
-            };
-
-            auto [aPtr, aRef] = ExtractKey(erasing, a);
-            auto [bPtr, bRef] = ExtractKey(erasing, b);
-            if (aPtr == nullptr || bPtr == nullptr) {
-                return false;
-            }
-            if (erasing) {
-                return aPtr == bPtr;
-            }
-            return BaseEqualityFunc()(aPtr, bPtr);
-        }
-    };
-};
-
-}  // namespace detail
-
+// The ContentLessObjectCache stores raw pointers to living Refs without adding to their refcounts.
+// This means that any RefCountedT that is inserted into the cache needs to make sure that their
+// DeleteThis function erases itself from the cache. Otherwise, the cache can grow indefinitely via
+// leaked pointers to deleted Refs.
 template <typename RefCountedT>
 class ContentLessObjectCache {
-    static_assert(std::is_base_of_v<detail::ContentLessObjectCacheableBase, RefCountedT>,
-                  "Type must be cacheable to use with ContentLessObjectCache.");
-    static_assert(std::is_base_of_v<RefCounted, RefCountedT>,
-                  "Type must be refcounted to use with ContentLessObjectCache.");
-
   public:
-    // The dtor asserts that the cache is empty to aid in finding pointer leaks that can be
-    // possible if the RefCountedT doesn't correctly implement the DeleteThis function to Uncache.
+    // The dtor asserts that the cache is empty to aid in finding pointer leaks that can be possible
+    // if the RefCountedT doesn't correctly implement the DeleteThis function to erase itself from
+    // the cache.
     ~ContentLessObjectCache() { ASSERT(Empty()); }
 
-    // Inserts the object into the cache returning a pair where the first is a Ref to the
-    // inserted or existing object, and the second is a bool that is true if we inserted
-    // `object` and false otherwise.
-    std::pair<Ref<RefCountedT>, bool> Insert(Ref<RefCountedT> obj) {
+    // Inserts the object into the cache returning a pair where the first is a Ref to the inserted
+    // or existing object, and the second is a bool that is true if we inserted `object` and false
+    // otherwise.
+    std::pair<Ref<RefCountedT>, bool> Insert(RefCountedT* object) {
         std::lock_guard<std::mutex> lock(mMutex);
-        detail::WeakRefAndHash<RefCountedT> weakref =
-            std::make_pair(obj.GetWeakRef(), typename RefCountedT::HashFunc()(obj.Get()));
-        auto [it, inserted] = mCache.insert(weakref);
+        auto [it, inserted] = mCache.insert(object);
         if (inserted) {
-            obj->mCache = this;
-            return {obj, inserted};
+            return {object, inserted};
         } else {
-            // Try to promote the found WeakRef to a Ref. If promotion fails, remove the old Key
-            // and insert this one.
-            Ref<RefCountedT> ref =
-                std::get<detail::WeakRefAndHash<RefCountedT>>(*it).first.Promote();
+            // We need to check that the found instance isn't about to be destroyed. If it is, we
+            // actually want to remove the old instance from the cache and insert this one. This can
+            // happen if the last ref of the current instance in the cache hit is already in the
+            // process of being removed but hasn't completed yet.
+            Ref<RefCountedT> ref = TryGetRef(static_cast<RefCountedT*>(*it));
             if (ref != nullptr) {
                 return {ref, false};
             } else {
                 mCache.erase(it);
-                auto result = mCache.insert(weakref);
+                auto result = mCache.insert(object);
                 ASSERT(result.second);
-                obj->mCache = this;
-                return {obj, true};
+                return {object, true};
             }
         }
     }
 
-    // Returns a valid Ref<T> if we can Promote the underlying WeakRef. Returns nullptr otherwise.
+    // Returns a valid Ref<T> if the underlying RefCounted object's refcount has not reached 0.
+    // Otherwise, returns nullptr.
     Ref<RefCountedT> Find(RefCountedT* blueprint) {
         std::lock_guard<std::mutex> lock(mMutex);
         auto it = mCache.find(blueprint);
         if (it != mCache.end()) {
-            return std::get<detail::WeakRefAndHash<RefCountedT>>(*it).first.Promote();
+            return TryGetRef(static_cast<RefCountedT*>(*it));
         }
         return nullptr;
     }
 
     // Erases the object from the cache if it exists and are pointer equal. Otherwise does not
     // modify the cache.
-    void Erase(RefCountedT* obj) {
+    void Erase(RefCountedT* object) {
         std::lock_guard<std::mutex> lock(mMutex);
-        auto it = mCache.find(detail::ForErase<RefCountedT>(obj));
-        if (it == mCache.end()) {
-            return;
+        auto it = mCache.find(object);
+        if (*it == object) {
+            mCache.erase(it);
         }
-        obj->mCache = nullptr;
-        mCache.erase(it);
     }
 
     // Returns true iff the cache is empty.
@@ -211,9 +92,9 @@
 
   private:
     std::mutex mMutex;
-    std::unordered_set<detail::ContentLessObjectCacheKey<RefCountedT>,
-                       typename detail::ContentLessObjectCacheKeyFuncs<RefCountedT>::HashFunc,
-                       typename detail::ContentLessObjectCacheKeyFuncs<RefCountedT>::EqualityFunc>
+    std::unordered_set<RefCountedT*,
+                       typename RefCountedT::HashFunc,
+                       typename RefCountedT::EqualityFunc>
         mCache;
 };
 
diff --git a/src/dawn/common/ContentLessObjectCacheable.h b/src/dawn/common/ContentLessObjectCacheable.h
deleted file mode 100644
index eae50e9..0000000
--- a/src/dawn/common/ContentLessObjectCacheable.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2023 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
-#define SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
-
-#include "dawn/common/WeakRefSupport.h"
-
-namespace dawn {
-
-template <typename RefCountedT>
-class ContentLessObjectCache;
-
-namespace detail {
-
-// Placeholding base class for cacheable types to enable easier compile-time verifications.
-class ContentLessObjectCacheableBase {};
-
-}  // namespace detail
-
-// Classes need to extend this type if they want to be cacheable via the ContentLessObjectCache. It
-// is also assumed that the type already extends RefCounted in some way. Otherwise, this helper
-// class does not work.
-template <typename RefCountedT>
-class ContentLessObjectCacheable : public detail::ContentLessObjectCacheableBase,
-                                   public WeakRefSupport<RefCountedT> {
-  public:
-    // Currently, any cacheables should call Uncache in their DeleteThis override. This is important
-    // because otherwise, the objects may be leaked in the internal set.
-    void Uncache() {
-        if (mCache != nullptr) {
-            // Note that Erase sets mCache to nullptr. We do it in Erase instead of doing it here in
-            // case users call Erase somewhere else before the Uncache call.
-            mCache->Erase(static_cast<RefCountedT*>(this));
-        }
-    }
-
-  protected:
-    // The dtor asserts that the cache isn't set to ensure that we were Uncache-d or never cached.
-    ~ContentLessObjectCacheable() override { ASSERT(mCache == nullptr); }
-
-  private:
-    friend class ContentLessObjectCache<RefCountedT>;
-
-    // Pointer to the owning cache if we were inserted at any point. This is set via the
-    // Insert/Erase functions on the cache.
-    ContentLessObjectCache<RefCountedT>* mCache = nullptr;
-};
-
-}  // namespace dawn
-
-#endif  // SRC_DAWN_COMMON_CONTENTLESSOBJECTCACHEABLE_H_
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
index 6971862..3424271 100644
--- a/src/dawn/common/RefCounted.h
+++ b/src/dawn/common/RefCounted.h
@@ -71,6 +71,20 @@
     // synchronization in place for destruction.
     void Release();
 
+    // Tries to return a valid Ref to `object` if it's internal refcount is not already 0. If the
+    // internal refcount has already reached 0, returns nullptr instead.
+    // TODO(dawn:1769) Remove this once ContentLessObjectCache is converted to use WeakRefs.
+    template <typename T, typename = typename std::is_convertible<T, RefCounted>>
+    friend Ref<T> TryGetRef(T* object) {
+        // Since this is called on the RefCounted class directly, and can race with destruction, we
+        // verify that we can safely increment the refcount first, create the Ref, then decrement
+        // the refcount in that order to ensure that the resultant Ref is a valid Ref.
+        if (!object->mRefCount.TryIncrement()) {
+            return nullptr;
+        }
+        return AcquireRef(object);
+    }
+
     void APIReference() { Reference(); }
     // APIRelease() can be called without any synchronization guarantees so we need to use a Release
     // method that will call LockAndDeleteThis() on destruction.
diff --git a/src/dawn/common/WeakRef.h b/src/dawn/common/WeakRef.h
index 26485f6..4764924 100644
--- a/src/dawn/common/WeakRef.h
+++ b/src/dawn/common/WeakRef.h
@@ -49,29 +49,15 @@
         return *this;
     }
 
-    // Constructor from explicit WeakRefSupport<T>* is allowed.
-    // NOLINTNEXTLINE(runtime/explicit)
-    WeakRef(WeakRefSupport<T>* support) : mData(support->mData) {}
-
     // Promotes a WeakRef to a Ref. Access to the raw pointer is not allowed because a raw pointer
     // could become invalid after being retrieved.
-    Ref<T> Promote() const {
+    Ref<T> Promote() {
         if (mData != nullptr) {
             return AcquireRef(static_cast<T*>(mData->TryGetRef().Detach()));
         }
         return nullptr;
     }
 
-    // Returns the raw pointer to the RefCountedT if it has not been invalidated. Note that this
-    // function is not thread-safe since the returned pointer can become invalid after being
-    // retrieved.
-    T* UnsafeGet() const {
-        if (mData != nullptr) {
-            return static_cast<T*>(mData->UnsafeGet());
-        }
-        return nullptr;
-    }
-
   private:
     // Friend is needed so that we can access the data ref in conversions.
     template <typename U>
diff --git a/src/dawn/common/WeakRefSupport.cpp b/src/dawn/common/WeakRefSupport.cpp
index a85a9d7..9273ff1 100644
--- a/src/dawn/common/WeakRefSupport.cpp
+++ b/src/dawn/common/WeakRefSupport.cpp
@@ -28,10 +28,6 @@
     return AcquireRef(mValue);
 }
 
-RefCounted* WeakRefData::UnsafeGet() const {
-    return mValue;
-}
-
 void WeakRefData::Invalidate() {
     std::lock_guard<std::mutex> lock(mMutex);
     mValue = nullptr;
diff --git a/src/dawn/common/WeakRefSupport.h b/src/dawn/common/WeakRefSupport.h
index de37965..9702905 100644
--- a/src/dawn/common/WeakRefSupport.h
+++ b/src/dawn/common/WeakRefSupport.h
@@ -34,10 +34,6 @@
     // internal refcount has already reached 0, returns nullptr instead.
     Ref<RefCounted> TryGetRef();
 
-    // Returns the raw pointer to the RefCounted. In general, this is an unsafe operation because
-    // the RefCounted can become invalid after being retrieved.
-    RefCounted* UnsafeGet() const;
-
   private:
     std::mutex mMutex;
     RefCounted* mValue = nullptr;
@@ -47,7 +43,7 @@
 class WeakRefSupportBase {
   protected:
     explicit WeakRefSupportBase(Ref<detail::WeakRefData> data);
-    virtual ~WeakRefSupportBase();
+    ~WeakRefSupportBase();
 
   private:
     template <typename T>
diff --git a/src/dawn/native/AttachmentState.cpp b/src/dawn/native/AttachmentState.cpp
index 9ece45d..1e7e370 100644
--- a/src/dawn/native/AttachmentState.cpp
+++ b/src/dawn/native/AttachmentState.cpp
@@ -122,9 +122,10 @@
     SetContentHash(blueprint.GetContentHash());
 }
 
-void AttachmentState::DeleteThis() {
-    Uncache();
-    RefCounted::DeleteThis();
+AttachmentState::~AttachmentState() {
+    if (IsCachedReference()) {
+        GetDevice()->UncacheAttachmentState(this);
+    }
 }
 
 bool AttachmentState::EqualityFunc::operator()(const AttachmentState* a,
diff --git a/src/dawn/native/AttachmentState.h b/src/dawn/native/AttachmentState.h
index c17faae..fcb0e43 100644
--- a/src/dawn/native/AttachmentState.h
+++ b/src/dawn/native/AttachmentState.h
@@ -19,7 +19,6 @@
 #include <bitset>
 
 #include "dawn/common/Constants.h"
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/ityp_array.h"
 #include "dawn/common/ityp_bitset.h"
 #include "dawn/native/CachedObject.h"
@@ -32,9 +31,7 @@
 
 class DeviceBase;
 
-class AttachmentState final : public ObjectBase,
-                              public CachedObject,
-                              public ContentLessObjectCacheable<AttachmentState> {
+class AttachmentState final : public ObjectBase, public CachedObject {
   public:
     // Note: Descriptors must be validated before the AttachmentState is constructed.
     explicit AttachmentState(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
@@ -44,6 +41,8 @@
     // Constructor used to avoid re-parsing descriptors when we already parsed them for cache keys.
     AttachmentState(const AttachmentState& blueprint);
 
+    ~AttachmentState() override;
+
     ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
     wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
     bool HasDepthStencilAttachment() const;
@@ -56,9 +55,6 @@
     };
     size_t ComputeContentHash() override;
 
-  protected:
-    void DeleteThis() override;
-
   private:
     ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
     ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
diff --git a/src/dawn/native/BindGroupLayout.cpp b/src/dawn/native/BindGroupLayout.cpp
index 6bb8c5f..4324548 100644
--- a/src/dawn/native/BindGroupLayout.cpp
+++ b/src/dawn/native/BindGroupLayout.cpp
@@ -505,7 +505,10 @@
 BindGroupLayoutBase::~BindGroupLayoutBase() = default;
 
 void BindGroupLayoutBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheBindGroupLayout(this);
+    }
 }
 
 // static
diff --git a/src/dawn/native/BindGroupLayout.h b/src/dawn/native/BindGroupLayout.h
index 77042b9..c2ffb97 100644
--- a/src/dawn/native/BindGroupLayout.h
+++ b/src/dawn/native/BindGroupLayout.h
@@ -21,7 +21,6 @@
 #include <string>
 
 #include "dawn/common/Constants.h"
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/SlabAllocator.h"
 #include "dawn/common/ityp_span.h"
 #include "dawn/common/ityp_vector.h"
@@ -50,9 +49,7 @@
 // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
 // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
 // into a packed range of |BindingIndex| integers.
-class BindGroupLayoutBase : public ApiObjectBase,
-                            public CachedObject,
-                            public ContentLessObjectCacheable<BindGroupLayoutBase> {
+class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
   public:
     BindGroupLayoutBase(DeviceBase* device,
                         const BindGroupLayoutDescriptor* descriptor,
diff --git a/src/dawn/native/CachedObject.cpp b/src/dawn/native/CachedObject.cpp
index 3460bc1..bf2be11 100644
--- a/src/dawn/native/CachedObject.cpp
+++ b/src/dawn/native/CachedObject.cpp
@@ -19,6 +19,14 @@
 
 namespace dawn::native {
 
+bool CachedObject::IsCachedReference() const {
+    return mIsCachedReference;
+}
+
+void CachedObject::SetIsCachedReference() {
+    mIsCachedReference = true;
+}
+
 size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
     return obj->GetContentHash();
 }
diff --git a/src/dawn/native/CachedObject.h b/src/dawn/native/CachedObject.h
index 5b7afec..1040f54 100644
--- a/src/dawn/native/CachedObject.h
+++ b/src/dawn/native/CachedObject.h
@@ -24,9 +24,13 @@
 namespace dawn::native {
 
 // Some objects are cached so that instead of creating new duplicate objects, we increase the
-// refcount of an existing object.
+// refcount of an existing object. When an object is successfully created, the device should call
+// SetIsCachedReference() and insert the object into the cache.
 class CachedObject {
   public:
+    bool IsCachedReference() const;
+    void SetIsCachedReference();
+
     // Functor necessary for the unordered_set<CachedObject*>-based cache.
     struct HashFunc {
         size_t operator()(const CachedObject* obj) const;
@@ -46,6 +50,8 @@
     // Called by ObjectContentHasher upon creation to record the object.
     virtual size_t ComputeContentHash() = 0;
 
+    bool mIsCachedReference = false;
+
     size_t mContentHash = 0;
     bool mIsContentHashInitialized = false;
 };
diff --git a/src/dawn/native/ComputePipeline.cpp b/src/dawn/native/ComputePipeline.cpp
index 9916a0a..99364e7 100644
--- a/src/dawn/native/ComputePipeline.cpp
+++ b/src/dawn/native/ComputePipeline.cpp
@@ -64,7 +64,10 @@
 ComputePipelineBase::~ComputePipelineBase() = default;
 
 void ComputePipelineBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheComputePipeline(this);
+    }
 }
 
 // static
diff --git a/src/dawn/native/ComputePipeline.h b/src/dawn/native/ComputePipeline.h
index 47662ba..85794ec 100644
--- a/src/dawn/native/ComputePipeline.h
+++ b/src/dawn/native/ComputePipeline.h
@@ -15,7 +15,6 @@
 #ifndef SRC_DAWN_NATIVE_COMPUTEPIPELINE_H_
 #define SRC_DAWN_NATIVE_COMPUTEPIPELINE_H_
 
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/NonCopyable.h"
 #include "dawn/native/Forward.h"
 #include "dawn/native/Pipeline.h"
@@ -28,8 +27,7 @@
 MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
                                              const ComputePipelineDescriptor* descriptor);
 
-class ComputePipelineBase : public PipelineBase,
-                            public ContentLessObjectCacheable<ComputePipelineBase> {
+class ComputePipelineBase : public PipelineBase {
   public:
     ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
     ~ComputePipelineBase() override;
diff --git a/src/dawn/native/Device.cpp b/src/dawn/native/Device.cpp
index 72e81e6..2a77e66 100644
--- a/src/dawn/native/Device.cpp
+++ b/src/dawn/native/Device.cpp
@@ -103,6 +103,9 @@
 
     bool inserted = false;
     std::tie(result, inserted) = cache.Insert(result.Get());
+    if (inserted) {
+        result->SetIsCachedReference();
+    }
     return ReturnType(result);
 }
 
@@ -853,6 +856,11 @@
         });
 }
 
+void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->bindGroupLayouts.Erase(obj);
+}
+
 // Private function used at initialization
 ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
     BindGroupLayoutDescriptor desc = {};
@@ -893,15 +901,30 @@
 Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
     Ref<ComputePipelineBase> computePipeline) {
     ASSERT(IsLockedByCurrentThreadIfNeeded());
-    auto [pipeline, _] = mCaches->computePipelines.Insert(computePipeline.Get());
-    return std::move(pipeline);
+    auto [cachedPipeline, inserted] = mCaches->computePipelines.Insert(computePipeline.Get());
+    if (inserted) {
+        computePipeline->SetIsCachedReference();
+        return computePipeline;
+    } else {
+        return std::move(cachedPipeline);
+    }
 }
 
 Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
     Ref<RenderPipelineBase> renderPipeline) {
     ASSERT(IsLockedByCurrentThreadIfNeeded());
-    auto [pipeline, _] = mCaches->renderPipelines.Insert(renderPipeline.Get());
-    return std::move(pipeline);
+    auto [cachedPipeline, inserted] = mCaches->renderPipelines.Insert(renderPipeline.Get());
+    if (inserted) {
+        renderPipeline->SetIsCachedReference();
+        return renderPipeline;
+    } else {
+        return std::move(cachedPipeline);
+    }
+}
+
+void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->computePipelines.Erase(obj);
 }
 
 ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateImplicitMSAARenderTextureViewFor(
@@ -974,6 +997,16 @@
                        });
 }
 
+void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->pipelineLayouts.Erase(obj);
+}
+
+void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->renderPipelines.Erase(obj);
+}
+
 ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
     const SamplerDescriptor* descriptor) {
     SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
@@ -989,6 +1022,11 @@
     });
 }
 
+void DeviceBase::UncacheSampler(SamplerBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->samplers.Erase(obj);
+}
+
 ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
     const ShaderModuleDescriptor* descriptor,
     ShaderModuleParseResult* parseResult,
@@ -1025,9 +1063,15 @@
         });
 }
 
+void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->shaderModules.Erase(obj);
+}
+
 Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentState* blueprint) {
     return GetOrCreate(mCaches->attachmentStates, blueprint, [&]() -> Ref<AttachmentState> {
-        return AcquireRef(new AttachmentState(*blueprint));
+        Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(*blueprint));
+        return attachmentState;
     });
 }
 
@@ -1049,6 +1093,11 @@
     return GetOrCreateAttachmentState(&blueprint);
 }
 
+void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+    ASSERT(obj->IsCachedReference());
+    mCaches->attachmentStates.Erase(obj);
+}
+
 Ref<PipelineCacheBase> DeviceBase::GetOrCreatePipelineCache(const CacheKey& key) {
     return GetOrCreatePipelineCacheImpl(key);
 }
diff --git a/src/dawn/native/Device.h b/src/dawn/native/Device.h
index 03ee648..0154474 100644
--- a/src/dawn/native/Device.h
+++ b/src/dawn/native/Device.h
@@ -195,10 +195,13 @@
     ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
         const BindGroupLayoutDescriptor* descriptor,
         PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+    void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
 
     BindGroupLayoutBase* GetEmptyBindGroupLayout();
     PipelineLayoutBase* GetEmptyPipelineLayout();
 
+    void UncacheComputePipeline(ComputePipelineBase* obj);
+
     ResultOrError<Ref<TextureViewBase>> CreateImplicitMSAARenderTextureViewFor(
         const TextureBase* singleSampledTexture,
         uint32_t sampleCount);
@@ -207,19 +210,25 @@
 
     ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
         const PipelineLayoutDescriptor* descriptor);
+    void UncachePipelineLayout(PipelineLayoutBase* obj);
+
+    void UncacheRenderPipeline(RenderPipelineBase* obj);
 
     ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+    void UncacheSampler(SamplerBase* obj);
 
     ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
         const ShaderModuleDescriptor* descriptor,
         ShaderModuleParseResult* parseResult,
         OwnedCompilationMessages* compilationMessages);
+    void UncacheShaderModule(ShaderModuleBase* obj);
 
     Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentState* blueprint);
     Ref<AttachmentState> GetOrCreateAttachmentState(
         const RenderBundleEncoderDescriptor* descriptor);
     Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
     Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
+    void UncacheAttachmentState(AttachmentState* obj);
 
     Ref<PipelineCacheBase> GetOrCreatePipelineCache(const CacheKey& key);
 
diff --git a/src/dawn/native/PipelineLayout.cpp b/src/dawn/native/PipelineLayout.cpp
index 839efe9..c11a86b 100644
--- a/src/dawn/native/PipelineLayout.cpp
+++ b/src/dawn/native/PipelineLayout.cpp
@@ -84,7 +84,10 @@
 PipelineLayoutBase::~PipelineLayoutBase() = default;
 
 void PipelineLayoutBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncachePipelineLayout(this);
+    }
 }
 
 // static
diff --git a/src/dawn/native/PipelineLayout.h b/src/dawn/native/PipelineLayout.h
index 6f89786..cd23026 100644
--- a/src/dawn/native/PipelineLayout.h
+++ b/src/dawn/native/PipelineLayout.h
@@ -21,7 +21,6 @@
 #include <vector>
 
 #include "dawn/common/Constants.h"
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/ityp_array.h"
 #include "dawn/common/ityp_bitset.h"
 #include "dawn/native/BindingInfo.h"
@@ -50,9 +49,7 @@
     ConstantEntry const* constants = nullptr;
 };
 
-class PipelineLayoutBase : public ApiObjectBase,
-                           public CachedObject,
-                           public ContentLessObjectCacheable<PipelineLayoutBase> {
+class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
   public:
     PipelineLayoutBase(DeviceBase* device,
                        const PipelineLayoutDescriptor* descriptor,
diff --git a/src/dawn/native/RenderPipeline.cpp b/src/dawn/native/RenderPipeline.cpp
index e8c7e4f..33dd20d 100644
--- a/src/dawn/native/RenderPipeline.cpp
+++ b/src/dawn/native/RenderPipeline.cpp
@@ -807,7 +807,10 @@
 RenderPipelineBase::~RenderPipelineBase() = default;
 
 void RenderPipelineBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheRenderPipeline(this);
+    }
 
     // Remove reference to the attachment state so that we don't have lingering references to
     // it preventing it from being uncached in the device.
diff --git a/src/dawn/native/RenderPipeline.h b/src/dawn/native/RenderPipeline.h
index 0408a98..ad5593a 100644
--- a/src/dawn/native/RenderPipeline.h
+++ b/src/dawn/native/RenderPipeline.h
@@ -19,7 +19,6 @@
 #include <bitset>
 #include <vector>
 
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/TypedInteger.h"
 #include "dawn/native/AttachmentState.h"
 #include "dawn/native/Forward.h"
@@ -61,8 +60,7 @@
     uint64_t lastStride;
 };
 
-class RenderPipelineBase : public PipelineBase,
-                           public ContentLessObjectCacheable<RenderPipelineBase> {
+class RenderPipelineBase : public PipelineBase {
   public:
     RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
     ~RenderPipelineBase() override;
diff --git a/src/dawn/native/Sampler.cpp b/src/dawn/native/Sampler.cpp
index b45255a..4e44d73 100644
--- a/src/dawn/native/Sampler.cpp
+++ b/src/dawn/native/Sampler.cpp
@@ -95,7 +95,10 @@
 SamplerBase::~SamplerBase() = default;
 
 void SamplerBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheSampler(this);
+    }
 }
 
 // static
diff --git a/src/dawn/native/Sampler.h b/src/dawn/native/Sampler.h
index 08c8bf8..93e6b58 100644
--- a/src/dawn/native/Sampler.h
+++ b/src/dawn/native/Sampler.h
@@ -15,7 +15,6 @@
 #ifndef SRC_DAWN_NATIVE_SAMPLER_H_
 #define SRC_DAWN_NATIVE_SAMPLER_H_
 
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/native/CachedObject.h"
 #include "dawn/native/Error.h"
 #include "dawn/native/Forward.h"
@@ -29,9 +28,7 @@
 
 MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
 
-class SamplerBase : public ApiObjectBase,
-                    public CachedObject,
-                    public ContentLessObjectCacheable<SamplerBase> {
+class SamplerBase : public ApiObjectBase, public CachedObject {
   public:
     SamplerBase(DeviceBase* device,
                 const SamplerDescriptor* descriptor,
diff --git a/src/dawn/native/ShaderModule.cpp b/src/dawn/native/ShaderModule.cpp
index fb9279b..ece5cd9 100644
--- a/src/dawn/native/ShaderModule.cpp
+++ b/src/dawn/native/ShaderModule.cpp
@@ -1124,7 +1124,10 @@
 ShaderModuleBase::~ShaderModuleBase() = default;
 
 void ShaderModuleBase::DestroyImpl() {
-    Uncache();
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheShaderModule(this);
+    }
 }
 
 // static
diff --git a/src/dawn/native/ShaderModule.h b/src/dawn/native/ShaderModule.h
index 16be99a..da58ad9 100644
--- a/src/dawn/native/ShaderModule.h
+++ b/src/dawn/native/ShaderModule.h
@@ -25,7 +25,6 @@
 #include <vector>
 
 #include "dawn/common/Constants.h"
-#include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/ityp_array.h"
 #include "dawn/native/BindingInfo.h"
 #include "dawn/native/CachedObject.h"
@@ -254,9 +253,7 @@
     bool usesSampleMaskOutput = false;
 };
 
-class ShaderModuleBase : public ApiObjectBase,
-                         public CachedObject,
-                         public ContentLessObjectCacheable<ShaderModuleBase> {
+class ShaderModuleBase : public ApiObjectBase, public CachedObject {
   public:
     ShaderModuleBase(DeviceBase* device,
                      const ShaderModuleDescriptor* descriptor,
diff --git a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
index 7ecacd5..4ad91c6 100644
--- a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
+++ b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
@@ -28,72 +28,75 @@
 
 using utils::BinarySemaphore;
 
-class CacheableT : public RefCounted, public ContentLessObjectCacheable<CacheableT> {
+class RefCountedT : public RefCounted {
   public:
-    explicit CacheableT(size_t value) : mValue(value) {}
-    CacheableT(size_t value, std::function<void(CacheableT*)> deleteFn)
+    explicit RefCountedT(size_t value) : mValue(value) {}
+    RefCountedT(size_t value, std::function<void(RefCountedT*)> deleteFn)
         : mValue(value), mDeleteFn(deleteFn) {}
 
-    ~CacheableT() override { mDeleteFn(this); }
+    ~RefCountedT() override { mDeleteFn(this); }
 
     struct HashFunc {
-        size_t operator()(const CacheableT* x) const { return x->mValue; }
+        size_t operator()(const RefCountedT* x) const { return x->mValue; }
     };
 
     struct EqualityFunc {
-        bool operator()(const CacheableT* l, const CacheableT* r) const {
+        bool operator()(const RefCountedT* l, const RefCountedT* r) const {
             return l->mValue == r->mValue;
         }
     };
 
   private:
     size_t mValue;
-    std::function<void(CacheableT*)> mDeleteFn = [](CacheableT*) -> void {};
+    std::function<void(RefCountedT*)> mDeleteFn = [](RefCountedT*) -> void {};
 };
 
 // Empty cache returns true on Empty().
 TEST(ContentLessObjectCacheTest, Empty) {
-    ContentLessObjectCache<CacheableT> cache;
+    ContentLessObjectCache<RefCountedT> cache;
     EXPECT_TRUE(cache.Empty());
 }
 
 // Non-empty cache returns false on Empty().
 TEST(ContentLessObjectCacheTest, NonEmpty) {
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
-    EXPECT_TRUE(cache.Insert(object).second);
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object =
+        AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
+    EXPECT_TRUE(cache.Insert(object.Get()).second);
     EXPECT_FALSE(cache.Empty());
 }
 
 // Object inserted into the cache are findable.
 TEST(ContentLessObjectCacheTest, Insert) {
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object =
+        AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
     EXPECT_TRUE(cache.Insert(object.Get()).second);
 
-    CacheableT blueprint(1);
-    Ref<CacheableT> cached = cache.Find(&blueprint);
+    RefCountedT blueprint(1);
+    Ref<RefCountedT> cached = cache.Find(&blueprint);
     EXPECT_TRUE(object.Get() == cached.Get());
 }
 
 // Duplicate insert calls on different objects with the same hash only inserts the first.
 TEST(ContentLessObjectCacheTest, InsertDuplicate) {
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object1 =
+        AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
     EXPECT_TRUE(cache.Insert(object1.Get()).second);
 
-    Ref<CacheableT> object2 = AcquireRef(new CacheableT(1));
+    Ref<RefCountedT> object2 = AcquireRef(new RefCountedT(1));
     EXPECT_FALSE(cache.Insert(object2.Get()).second);
 
-    CacheableT blueprint(1);
-    Ref<CacheableT> cached = cache.Find(&blueprint);
+    RefCountedT blueprint(1);
+    Ref<RefCountedT> cached = cache.Find(&blueprint);
     EXPECT_TRUE(object1.Get() == cached.Get());
 }
 
 // Erasing the only entry leaves the cache empty.
 TEST(ContentLessObjectCacheTest, Erase) {
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object = AcquireRef(new CacheableT(1));
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object = AcquireRef(new RefCountedT(1));
     EXPECT_TRUE(cache.Insert(object.Get()).second);
     EXPECT_FALSE(cache.Empty());
 
@@ -103,12 +106,13 @@
 
 // Erasing a hash equivalent but not pointer equivalent entry is a no-op.
 TEST(ContentLessObjectCacheTest, EraseDuplicate) {
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object1 =
+        AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
     EXPECT_TRUE(cache.Insert(object1.Get()).second);
     EXPECT_FALSE(cache.Empty());
 
-    Ref<CacheableT> object2 = AcquireRef(new CacheableT(1));
+    Ref<RefCountedT> object2 = AcquireRef(new RefCountedT(1));
     cache.Erase(object2.Get());
     EXPECT_FALSE(cache.Empty());
 }
@@ -117,21 +121,21 @@
 TEST(ContentLessObjectCacheTest, InsertingAndFinding) {
     constexpr size_t kNumObjects = 100;
     constexpr size_t kNumThreads = 8;
-    ContentLessObjectCache<CacheableT> cache;
-    std::vector<Ref<CacheableT>> objects(kNumObjects);
+    ContentLessObjectCache<RefCountedT> cache;
+    std::vector<Ref<RefCountedT>> objects(kNumObjects);
 
     auto f = [&] {
         for (size_t i = 0; i < kNumObjects; i++) {
-            Ref<CacheableT> object =
-                AcquireRef(new CacheableT(i, [&](CacheableT* x) { cache.Erase(x); }));
+            Ref<RefCountedT> object =
+                AcquireRef(new RefCountedT(i, [&](RefCountedT* x) { cache.Erase(x); }));
             if (cache.Insert(object.Get()).second) {
                 // This shouldn't race because exactly 1 thread should successfully insert.
                 objects[i] = object;
             }
         }
         for (size_t i = 0; i < kNumObjects; i++) {
-            CacheableT blueprint(i);
-            Ref<CacheableT> cached = cache.Find(&blueprint);
+            RefCountedT blueprint(i);
+            Ref<RefCountedT> cached = cache.Find(&blueprint);
             EXPECT_NE(cached.Get(), nullptr);
             EXPECT_EQ(cached.Get(), objects[i].Get());
         }
@@ -150,8 +154,8 @@
 TEST(ContentLessObjectCacheTest, FindDeleting) {
     BinarySemaphore semA, semB;
 
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object = AcquireRef(new CacheableT(1, [&](CacheableT* x) {
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object = AcquireRef(new RefCountedT(1, [&](RefCountedT* x) {
         semA.Release();
         semB.Acquire();
         cache.Erase(x);
@@ -163,7 +167,7 @@
     // Thread B will try to Find the entry before it is completely destroyed.
     auto threadB = [&] {
         semA.Acquire();
-        CacheableT blueprint(1);
+        RefCountedT blueprint(1);
         EXPECT_TRUE(cache.Find(&blueprint) == nullptr);
         semB.Release();
     };
@@ -179,15 +183,16 @@
 TEST(ContentLessObjectCacheTest, InsertDeleting) {
     BinarySemaphore semA, semB;
 
-    ContentLessObjectCache<CacheableT> cache;
-    Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, [&](CacheableT* x) {
+    ContentLessObjectCache<RefCountedT> cache;
+    Ref<RefCountedT> object1 = AcquireRef(new RefCountedT(1, [&](RefCountedT* x) {
         semA.Release();
         semB.Acquire();
         cache.Erase(x);
     }));
     EXPECT_TRUE(cache.Insert(object1.Get()).second);
 
-    Ref<CacheableT> object2 = AcquireRef(new CacheableT(1, [&](CacheableT* x) { cache.Erase(x); }));
+    Ref<RefCountedT> object2 =
+        AcquireRef(new RefCountedT(1, [&](RefCountedT* x) { cache.Erase(x); }));
 
     // Thread A will release the last reference of the original object.
     auto threadA = [&] { object1 = nullptr; };
@@ -204,8 +209,8 @@
     tA.join();
     tB.join();
 
-    CacheableT blueprint(1);
-    Ref<CacheableT> cached = cache.Find(&blueprint);
+    RefCountedT blueprint(1);
+    Ref<RefCountedT> cached = cache.Find(&blueprint);
     EXPECT_TRUE(object2.Get() == cached.Get());
 }
 
diff --git a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
index 3bc14e5..74d8054 100644
--- a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
@@ -162,6 +162,7 @@
         wgpu::BindGroupLayout bindGroupLayout = device.CreateBindGroupLayout(ToCppAPI(&desc));
 
         EXPECT_TRUE(FromAPI(bindGroupLayout.Get())->IsAlive());
+        EXPECT_TRUE(FromAPI(bindGroupLayout.Get())->IsCachedReference());
     }
 }
 
@@ -342,6 +343,7 @@
         wgpu::ComputePipeline computePipeline = device.CreateComputePipeline(ToCppAPI(&desc));
 
         EXPECT_TRUE(FromAPI(computePipeline.Get())->IsAlive());
+        EXPECT_TRUE(FromAPI(computePipeline.Get())->IsCachedReference());
     }
 }
 
@@ -480,6 +482,7 @@
         wgpu::PipelineLayout pipelineLayout = device.CreatePipelineLayout(ToCppAPI(&desc));
 
         EXPECT_TRUE(FromAPI(pipelineLayout.Get())->IsAlive());
+        EXPECT_TRUE(FromAPI(pipelineLayout.Get())->IsCachedReference());
     }
 }
 
@@ -569,6 +572,7 @@
         wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(ToCppAPI(&desc));
 
         EXPECT_TRUE(FromAPI(renderPipeline.Get())->IsAlive());
+        EXPECT_TRUE(FromAPI(renderPipeline.Get())->IsCachedReference());
     }
 }
 
@@ -598,6 +602,7 @@
         wgpu::Sampler sampler = device.CreateSampler(ToCppAPI(&desc));
 
         EXPECT_TRUE(FromAPI(sampler.Get())->IsAlive());
+        EXPECT_TRUE(FromAPI(sampler.Get())->IsCachedReference());
     }
 }