Switch ContentLessObjectCache to absl::flat_hash_set
absl::flat_hash_set seems to do one extra equality comparison in
our tests. Adjust the unittests so that tests with multiple
threads only fake the thread ordering once to avoid deadlock.
Bug: dawn:1513
Change-Id: I22c307ea54be4fad158f87e86ffbe61bee6f5ac9
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/185241
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Austin Eng <enga@chromium.org>
diff --git a/src/dawn/common/ContentLessObjectCache.h b/src/dawn/common/ContentLessObjectCache.h
index 8a7a755..ffaf447 100644
--- a/src/dawn/common/ContentLessObjectCache.h
+++ b/src/dawn/common/ContentLessObjectCache.h
@@ -31,10 +31,10 @@
#include <mutex>
#include <tuple>
#include <type_traits>
-#include <unordered_set>
#include <utility>
#include <variant>
+#include "absl/container/flat_hash_set.h"
#include "dawn/common/ContentLessObjectCacheable.h"
#include "dawn/common/Ref.h"
#include "dawn/common/RefCounted.h"
@@ -182,11 +182,8 @@
using CacheKeyFuncs = detail::ContentLessObjectCacheKeyFuncs<RefCountedT>;
public:
- // Constructor needs to pass in 'this' to the EqualityFunc. Since the default bucket_count on
- // sets is implementation defined, creating a temporary unused set to get the value. The actual
- // type of the temporary does not matter.
ContentLessObjectCache()
- : mCache(std::unordered_set<int>().bucket_count(),
+ : mCache(/*capacity=*/0,
typename CacheKeyFuncs::HashFunc(),
typename CacheKeyFuncs::EqualityFunc(this)) {}
@@ -274,17 +271,17 @@
}
std::mutex mMutex;
- std::unordered_set<detail::ContentLessObjectCacheKey<RefCountedT>,
- typename CacheKeyFuncs::HashFunc,
- typename CacheKeyFuncs::EqualityFunc>
+ absl::flat_hash_set<detail::ContentLessObjectCacheKey<RefCountedT>,
+ typename CacheKeyFuncs::HashFunc,
+ typename CacheKeyFuncs::EqualityFunc>
mCache;
// The cache has a pointer to a StackVector of temporary Refs that are by-products of Promotes
// inside the EqualityFunc. These Refs need to outlive the EqualityFunc calls because otherwise,
// they could be the last living Ref of the object resulting in a re-entrant Erase call that
- // deadlocks on the mutex. Since the default max_load_factor of most std::unordered_set
- // implementations should be 1.0 (roughly 1 element per bucket), a StackVector of length 4
- // should be enough space in most cases. See dawn:1993 for more details.
+ // deadlocks on the mutex.
+ // Absl should make fewer than 1 equality checks per set operation, so a StackVector of length
+ // 4 should be sufficient for most cases. See dawn:1993 for more details.
raw_ptr<StackVector<Ref<RefCountedT>, 4>> mTemporaryRefs = nullptr;
};
diff --git a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
index 477fced..5b3ca1e 100644
--- a/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
+++ b/src/dawn/tests/unittests/ContentLessObjectCacheTests.cpp
@@ -233,13 +233,17 @@
ContentLessObjectCache<CacheableT> cache;
Ref<CacheableT> object = AcquireRef(new CacheableT(1));
object->SetDeleteFn([&](CacheableT* x) { cache.Erase(x); });
- object->SetEqualFn([&](const CacheableT* x) {
- semA.Release();
- semB.Acquire();
- });
EXPECT_TRUE(cache.Insert(object.Get()).second);
CacheableT* objectPtr = object.Get();
+ std::atomic_flag eqOnceFlag;
+ object->SetEqualFn([&](const CacheableT* x) {
+ if (!eqOnceFlag.test_and_set()) {
+ semA.Release();
+ semB.Acquire();
+ }
+ });
+
// Thread A will release the last reference of the original object after the object has been
// promoted internally for equality check.
auto threadA = [&] {
@@ -268,12 +272,16 @@
ContentLessObjectCache<CacheableT> cache;
Ref<CacheableT> object = AcquireRef(new CacheableT(1, 1));
object->SetDeleteFn([&](CacheableT* x) { cache.Erase(x); });
- object->SetEqualFn([&](const CacheableT* x) {
- semA.Release();
- semB.Acquire();
- });
EXPECT_TRUE(cache.Insert(object.Get()).second);
+ std::atomic_flag eqOnceFlag;
+ object->SetEqualFn([&](const CacheableT* x) {
+ if (!eqOnceFlag.test_and_set()) {
+ semA.Release();
+ semB.Acquire();
+ }
+ });
+
// Thread A will release the last reference of the original object after the object has been
// promoted internally for equality check.
auto threadA = [&] {
@@ -338,14 +346,18 @@
ContentLessObjectCache<CacheableT> cache;
Ref<CacheableT> object1 = AcquireRef(new CacheableT(1));
- object1->SetEqualFn([&](const CacheableT* x) {
- semA.Release();
- semB.Acquire();
- });
object1->SetDeleteFn([&](CacheableT* x) { cache.Erase(x); });
EXPECT_TRUE(cache.Insert(object1.Get()).second);
CacheableT* object1Ptr = object1.Get();
+ std::atomic_flag eqOnceFlag;
+ object1->SetEqualFn([&](const CacheableT* x) {
+ if (!eqOnceFlag.test_and_set()) {
+ semA.Release();
+ semB.Acquire();
+ }
+ });
+
// Thread A will release the last reference of the original object after the object has been
// promoted internally for equality check.
auto threadA = [&] {
@@ -375,13 +387,17 @@
ContentLessObjectCache<CacheableT> cache;
Ref<CacheableT> object1 = AcquireRef(new CacheableT(1, 1));
- object1->SetEqualFn([&](const CacheableT* x) {
- semA.Release();
- semB.Acquire();
- });
object1->SetDeleteFn([&](CacheableT* x) { cache.Erase(x); });
EXPECT_TRUE(cache.Insert(object1.Get()).second);
+ std::atomic_flag eqOnceFlag;
+ object1->SetEqualFn([&](const CacheableT* x) {
+ if (!eqOnceFlag.test_and_set()) {
+ semA.Release();
+ semB.Acquire();
+ }
+ });
+
// Thread A will release the last reference of the original object after the object has been
// promoted internally for equality check.
auto threadA = [&] {