Consistent formatting for Dawn/Tint.

This CL updates the clang format files to have a single shared format
between Dawn and Tint. The major changes are tabs are 4 spaces, lines
are 100 columns and namespaces are not indented.

Bug: dawn:1339
Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
diff --git a/src/dawn/tests/DawnNativeTest.cpp b/src/dawn/tests/DawnNativeTest.cpp
index 02919ea..c07c5c2 100644
--- a/src/dawn/tests/DawnNativeTest.cpp
+++ b/src/dawn/tests/DawnNativeTest.cpp
@@ -23,13 +23,12 @@
 
 namespace dawn::native {
 
-    void AddFatalDawnFailure(const char* expression, const ErrorData* error) {
-        const auto& backtrace = error->GetBacktrace();
-        GTEST_MESSAGE_AT_(
-            backtrace.at(0).file, backtrace.at(0).line,
-            absl::StrCat(expression, " returned error: ", error->GetMessage()).c_str(),
-            ::testing::TestPartResult::kFatalFailure);
-    }
+void AddFatalDawnFailure(const char* expression, const ErrorData* error) {
+    const auto& backtrace = error->GetBacktrace();
+    GTEST_MESSAGE_AT_(backtrace.at(0).file, backtrace.at(0).line,
+                      absl::StrCat(expression, " returned error: ", error->GetMessage()).c_str(),
+                      ::testing::TestPartResult::kFatalFailure);
+}
 
 }  // namespace dawn::native
 
@@ -67,8 +66,7 @@
     device.SetUncapturedErrorCallback(DawnNativeTest::OnDeviceError, nullptr);
 }
 
-void DawnNativeTest::TearDown() {
-}
+void DawnNativeTest::TearDown() {}
 
 WGPUDevice DawnNativeTest::CreateTestDevice() {
     // Disabled disallowing unsafe APIs so we can test them.
diff --git a/src/dawn/tests/DawnNativeTest.h b/src/dawn/tests/DawnNativeTest.h
index 53b6ab8..e92bf67 100644
--- a/src/dawn/tests/DawnNativeTest.h
+++ b/src/dawn/tests/DawnNativeTest.h
@@ -24,11 +24,11 @@
 
 namespace dawn::native {
 
-    // This is similar to DAWN_TRY_ASSIGN but produces a fatal GTest error if EXPR is an error.
+// This is similar to DAWN_TRY_ASSIGN but produces a fatal GTest error if EXPR is an error.
 #define DAWN_ASSERT_AND_ASSIGN(VAR, EXPR) \
     DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {}, AddFatalDawnFailure(#EXPR, error.get()))
 
-    void AddFatalDawnFailure(const char* expression, const ErrorData* error);
+void AddFatalDawnFailure(const char* expression, const ErrorData* error);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/DawnTest.cpp b/src/dawn/tests/DawnTest.cpp
index b4e0f4a..816dbee 100644
--- a/src/dawn/tests/DawnTest.cpp
+++ b/src/dawn/tests/DawnTest.cpp
@@ -44,67 +44,67 @@
 #include "dawn/wire/WireServer.h"
 
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-#    include "GLFW/glfw3.h"
-#    include "dawn/native/OpenGLBackend.h"
+#include "GLFW/glfw3.h"
+#include "dawn/native/OpenGLBackend.h"
 #endif  // DAWN_ENABLE_BACKEND_OPENGL
 
 namespace {
 
-    std::string ParamName(wgpu::BackendType type) {
-        switch (type) {
-            case wgpu::BackendType::D3D12:
-                return "D3D12";
-            case wgpu::BackendType::Metal:
-                return "Metal";
-            case wgpu::BackendType::Null:
-                return "Null";
-            case wgpu::BackendType::OpenGL:
-                return "OpenGL";
-            case wgpu::BackendType::OpenGLES:
-                return "OpenGLES";
-            case wgpu::BackendType::Vulkan:
-                return "Vulkan";
-            default:
-                UNREACHABLE();
+std::string ParamName(wgpu::BackendType type) {
+    switch (type) {
+        case wgpu::BackendType::D3D12:
+            return "D3D12";
+        case wgpu::BackendType::Metal:
+            return "Metal";
+        case wgpu::BackendType::Null:
+            return "Null";
+        case wgpu::BackendType::OpenGL:
+            return "OpenGL";
+        case wgpu::BackendType::OpenGLES:
+            return "OpenGLES";
+        case wgpu::BackendType::Vulkan:
+            return "Vulkan";
+        default:
+            UNREACHABLE();
+    }
+}
+
+const char* AdapterTypeName(wgpu::AdapterType type) {
+    switch (type) {
+        case wgpu::AdapterType::DiscreteGPU:
+            return "Discrete GPU";
+        case wgpu::AdapterType::IntegratedGPU:
+            return "Integrated GPU";
+        case wgpu::AdapterType::CPU:
+            return "CPU";
+        case wgpu::AdapterType::Unknown:
+            return "Unknown";
+        default:
+            UNREACHABLE();
+    }
+}
+
+struct MapReadUserdata {
+    DawnTestBase* test;
+    size_t slot;
+};
+
+DawnTestEnvironment* gTestEnv = nullptr;
+
+template <typename T>
+void printBuffer(testing::AssertionResult& result, const T* buffer, const size_t count) {
+    static constexpr unsigned int kBytes = sizeof(T);
+
+    for (size_t index = 0; index < count; ++index) {
+        auto byteView = reinterpret_cast<const uint8_t*>(buffer + index);
+        for (unsigned int b = 0; b < kBytes; ++b) {
+            char buf[4];
+            snprintf(buf, sizeof(buf), "%02X ", byteView[b]);
+            result << buf;
         }
     }
-
-    const char* AdapterTypeName(wgpu::AdapterType type) {
-        switch (type) {
-            case wgpu::AdapterType::DiscreteGPU:
-                return "Discrete GPU";
-            case wgpu::AdapterType::IntegratedGPU:
-                return "Integrated GPU";
-            case wgpu::AdapterType::CPU:
-                return "CPU";
-            case wgpu::AdapterType::Unknown:
-                return "Unknown";
-            default:
-                UNREACHABLE();
-        }
-    }
-
-    struct MapReadUserdata {
-        DawnTestBase* test;
-        size_t slot;
-    };
-
-    DawnTestEnvironment* gTestEnv = nullptr;
-
-    template <typename T>
-    void printBuffer(testing::AssertionResult& result, const T* buffer, const size_t count) {
-        static constexpr unsigned int kBytes = sizeof(T);
-
-        for (size_t index = 0; index < count; ++index) {
-            auto byteView = reinterpret_cast<const uint8_t*>(buffer + index);
-            for (unsigned int b = 0; b < kBytes; ++b) {
-                char buf[4];
-                snprintf(buf, sizeof(buf), "%02X ", byteView[b]);
-                result << buf;
-            }
-        }
-        result << std::endl;
-    }
+    result << std::endl;
+}
 
 }  // anonymous namespace
 
@@ -121,8 +121,7 @@
                                      std::initializer_list<const char*> forceDisabledWorkarounds)
     : backendType(backendType),
       forceEnabledWorkarounds(forceEnabledWorkarounds),
-      forceDisabledWorkarounds(forceDisabledWorkarounds) {
-}
+      forceDisabledWorkarounds(forceDisabledWorkarounds) {}
 
 BackendTestConfig D3D12Backend(std::initializer_list<const char*> forceEnabledWorkarounds,
                                std::initializer_list<const char*> forceDisabledWorkarounds) {
@@ -162,15 +161,13 @@
 
 TestAdapterProperties::TestAdapterProperties(const wgpu::AdapterProperties& properties,
                                              bool selected)
-    : wgpu::AdapterProperties(properties), adapterName(properties.name), selected(selected) {
-}
+    : wgpu::AdapterProperties(properties), adapterName(properties.name), selected(selected) {}
 
 AdapterTestParam::AdapterTestParam(const BackendTestConfig& config,
                                    const TestAdapterProperties& adapterProperties)
     : adapterProperties(adapterProperties),
       forceEnabledWorkarounds(config.forceEnabledWorkarounds),
-      forceDisabledWorkarounds(config.forceDisabledWorkarounds) {
-}
+      forceDisabledWorkarounds(config.forceDisabledWorkarounds) {}
 
 std::ostream& operator<<(std::ostream& os, const AdapterTestParam& param) {
     os << ParamName(param.adapterProperties.backendType) << " "
@@ -193,8 +190,7 @@
     return os;
 }
 
-DawnTestBase::PrintToStringParamName::PrintToStringParamName(const char* test) : mTest(test) {
-}
+DawnTestBase::PrintToStringParamName::PrintToStringParamName(const char* test) : mTest(test) {}
 
 std::string DawnTestBase::PrintToStringParamName::SanitizeParamName(std::string paramName,
                                                                     size_t index) const {
@@ -700,8 +696,7 @@
 
 DawnTestBase::DawnTestBase(const AdapterTestParam& param)
     : mParam(param),
-      mWireHelper(utils::CreateWireHelper(gTestEnv->UsesWire(), gTestEnv->GetWireTraceDir())) {
-}
+      mWireHelper(utils::CreateWireHelper(gTestEnv->UsesWire(), gTestEnv->GetWireTraceDir())) {}
 
 DawnTestBase::~DawnTestBase() {
     // We need to destroy child objects before the Device
@@ -1587,154 +1582,153 @@
 }
 
 namespace detail {
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams) {
-        ASSERT(gTestEnv != nullptr);
-        return gTestEnv->GetAvailableAdapterTestParamsForBackends(params, numParams);
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams) {
+    ASSERT(gTestEnv != nullptr);
+    return gTestEnv->GetAvailableAdapterTestParamsForBackends(params, numParams);
+}
+
+// Helper classes to set expectations
+
+template <typename T, typename U>
+ExpectEq<T, U>::ExpectEq(T singleValue, T tolerance) : mTolerance(tolerance) {
+    mExpected.push_back(singleValue);
+}
+
+template <typename T, typename U>
+ExpectEq<T, U>::ExpectEq(const T* values, const unsigned int count, T tolerance)
+    : mTolerance(tolerance) {
+    mExpected.assign(values, values + count);
+}
+
+namespace {
+
+template <typename T, typename U = T>
+testing::AssertionResult CheckImpl(const T& expected, const U& actual, const T& tolerance) {
+    ASSERT(tolerance == T{});
+    if (expected != actual) {
+        return testing::AssertionFailure() << expected << ", actual " << actual;
     }
+    return testing::AssertionSuccess();
+}
 
-    // Helper classes to set expectations
-
-    template <typename T, typename U>
-    ExpectEq<T, U>::ExpectEq(T singleValue, T tolerance) : mTolerance(tolerance) {
-        mExpected.push_back(singleValue);
+template <>
+testing::AssertionResult CheckImpl<float>(const float& expected,
+                                          const float& actual,
+                                          const float& tolerance) {
+    if (abs(expected - actual) > tolerance) {
+        return tolerance == 0.0 ? testing::AssertionFailure() << expected << ", actual " << actual
+                                : testing::AssertionFailure() << "within " << tolerance << " of "
+                                                              << expected << ", actual " << actual;
     }
+    return testing::AssertionSuccess();
+}
 
-    template <typename T, typename U>
-    ExpectEq<T, U>::ExpectEq(const T* values, const unsigned int count, T tolerance)
-        : mTolerance(tolerance) {
-        mExpected.assign(values, values + count);
+// Interpret uint16_t as float16
+// This is mostly for reading float16 output from textures
+template <>
+testing::AssertionResult CheckImpl<float, uint16_t>(const float& expected,
+                                                    const uint16_t& actual,
+                                                    const float& tolerance) {
+    float actualF32 = Float16ToFloat32(actual);
+    if (abs(expected - actualF32) > tolerance) {
+        return tolerance == 0.0
+                   ? testing::AssertionFailure() << expected << ", actual " << actualF32
+                   : testing::AssertionFailure() << "within " << tolerance << " of " << expected
+                                                 << ", actual " << actualF32;
     }
+    return testing::AssertionSuccess();
+}
 
-    namespace {
+}  // namespace
 
-        template <typename T, typename U = T>
-        testing::AssertionResult CheckImpl(const T& expected, const U& actual, const T& tolerance) {
-            ASSERT(tolerance == T{});
-            if (expected != actual) {
-                return testing::AssertionFailure() << expected << ", actual " << actual;
+template <typename T, typename U>
+testing::AssertionResult ExpectEq<T, U>::Check(const void* data, size_t size) {
+    DAWN_ASSERT(size == sizeof(U) * mExpected.size());
+    const U* actual = static_cast<const U*>(data);
+
+    for (size_t i = 0; i < mExpected.size(); ++i) {
+        testing::AssertionResult check = CheckImpl(mExpected[i], actual[i], mTolerance);
+        if (!check) {
+            testing::AssertionResult result = testing::AssertionFailure()
+                                              << "Expected data[" << i << "] to be "
+                                              << check.message() << std::endl;
+
+            if (mExpected.size() <= 1024) {
+                result << "Expected:" << std::endl;
+                printBuffer(result, mExpected.data(), mExpected.size());
+
+                result << "Actual:" << std::endl;
+                printBuffer(result, actual, mExpected.size());
             }
-            return testing::AssertionSuccess();
-        }
 
-        template <>
-        testing::AssertionResult CheckImpl<float>(const float& expected,
-                                                  const float& actual,
-                                                  const float& tolerance) {
-            if (abs(expected - actual) > tolerance) {
-                return tolerance == 0.0
-                           ? testing::AssertionFailure() << expected << ", actual " << actual
-                           : testing::AssertionFailure() << "within " << tolerance << " of "
-                                                         << expected << ", actual " << actual;
+            return result;
+        }
+    }
+    return testing::AssertionSuccess();
+}
+
+template class ExpectEq<uint8_t>;
+template class ExpectEq<uint16_t>;
+template class ExpectEq<uint32_t>;
+template class ExpectEq<uint64_t>;
+template class ExpectEq<RGBA8>;
+template class ExpectEq<float>;
+template class ExpectEq<float, uint16_t>;
+
+template <typename T>
+ExpectBetweenColors<T>::ExpectBetweenColors(T value0, T value1) {
+    T l, h;
+    l.r = std::min(value0.r, value1.r);
+    l.g = std::min(value0.g, value1.g);
+    l.b = std::min(value0.b, value1.b);
+    l.a = std::min(value0.a, value1.a);
+
+    h.r = std::max(value0.r, value1.r);
+    h.g = std::max(value0.g, value1.g);
+    h.b = std::max(value0.b, value1.b);
+    h.a = std::max(value0.a, value1.a);
+
+    mLowerColorChannels.push_back(l);
+    mHigherColorChannels.push_back(h);
+
+    mValues0.push_back(value0);
+    mValues1.push_back(value1);
+}
+
+template <typename T>
+testing::AssertionResult ExpectBetweenColors<T>::Check(const void* data, size_t size) {
+    DAWN_ASSERT(size == sizeof(T) * mLowerColorChannels.size());
+    DAWN_ASSERT(mHigherColorChannels.size() == mLowerColorChannels.size());
+    DAWN_ASSERT(mValues0.size() == mValues1.size());
+    DAWN_ASSERT(mValues0.size() == mLowerColorChannels.size());
+
+    const T* actual = static_cast<const T*>(data);
+
+    for (size_t i = 0; i < mLowerColorChannels.size(); ++i) {
+        if (!(actual[i] >= mLowerColorChannels[i] && actual[i] <= mHigherColorChannels[i])) {
+            testing::AssertionResult result = testing::AssertionFailure()
+                                              << "Expected data[" << i << "] to be between "
+                                              << mValues0[i] << " and " << mValues1[i]
+                                              << ", actual " << actual[i] << std::endl;
+
+            if (mLowerColorChannels.size() <= 1024) {
+                result << "Expected between:" << std::endl;
+                printBuffer(result, mValues0.data(), mLowerColorChannels.size());
+                result << "and" << std::endl;
+                printBuffer(result, mValues1.data(), mLowerColorChannels.size());
+
+                result << "Actual:" << std::endl;
+                printBuffer(result, actual, mLowerColorChannels.size());
             }
-            return testing::AssertionSuccess();
+
+            return result;
         }
-
-        // Interpret uint16_t as float16
-        // This is mostly for reading float16 output from textures
-        template <>
-        testing::AssertionResult CheckImpl<float, uint16_t>(const float& expected,
-                                                            const uint16_t& actual,
-                                                            const float& tolerance) {
-            float actualF32 = Float16ToFloat32(actual);
-            if (abs(expected - actualF32) > tolerance) {
-                return tolerance == 0.0
-                           ? testing::AssertionFailure() << expected << ", actual " << actualF32
-                           : testing::AssertionFailure() << "within " << tolerance << " of "
-                                                         << expected << ", actual " << actualF32;
-            }
-            return testing::AssertionSuccess();
-        }
-
-    }  // namespace
-
-    template <typename T, typename U>
-    testing::AssertionResult ExpectEq<T, U>::Check(const void* data, size_t size) {
-        DAWN_ASSERT(size == sizeof(U) * mExpected.size());
-        const U* actual = static_cast<const U*>(data);
-
-        for (size_t i = 0; i < mExpected.size(); ++i) {
-            testing::AssertionResult check = CheckImpl(mExpected[i], actual[i], mTolerance);
-            if (!check) {
-                testing::AssertionResult result = testing::AssertionFailure()
-                                                  << "Expected data[" << i << "] to be "
-                                                  << check.message() << std::endl;
-
-                if (mExpected.size() <= 1024) {
-                    result << "Expected:" << std::endl;
-                    printBuffer(result, mExpected.data(), mExpected.size());
-
-                    result << "Actual:" << std::endl;
-                    printBuffer(result, actual, mExpected.size());
-                }
-
-                return result;
-            }
-        }
-        return testing::AssertionSuccess();
     }
 
-    template class ExpectEq<uint8_t>;
-    template class ExpectEq<uint16_t>;
-    template class ExpectEq<uint32_t>;
-    template class ExpectEq<uint64_t>;
-    template class ExpectEq<RGBA8>;
-    template class ExpectEq<float>;
-    template class ExpectEq<float, uint16_t>;
+    return testing::AssertionSuccess();
+}
 
-    template <typename T>
-    ExpectBetweenColors<T>::ExpectBetweenColors(T value0, T value1) {
-        T l, h;
-        l.r = std::min(value0.r, value1.r);
-        l.g = std::min(value0.g, value1.g);
-        l.b = std::min(value0.b, value1.b);
-        l.a = std::min(value0.a, value1.a);
-
-        h.r = std::max(value0.r, value1.r);
-        h.g = std::max(value0.g, value1.g);
-        h.b = std::max(value0.b, value1.b);
-        h.a = std::max(value0.a, value1.a);
-
-        mLowerColorChannels.push_back(l);
-        mHigherColorChannels.push_back(h);
-
-        mValues0.push_back(value0);
-        mValues1.push_back(value1);
-    }
-
-    template <typename T>
-    testing::AssertionResult ExpectBetweenColors<T>::Check(const void* data, size_t size) {
-        DAWN_ASSERT(size == sizeof(T) * mLowerColorChannels.size());
-        DAWN_ASSERT(mHigherColorChannels.size() == mLowerColorChannels.size());
-        DAWN_ASSERT(mValues0.size() == mValues1.size());
-        DAWN_ASSERT(mValues0.size() == mLowerColorChannels.size());
-
-        const T* actual = static_cast<const T*>(data);
-
-        for (size_t i = 0; i < mLowerColorChannels.size(); ++i) {
-            if (!(actual[i] >= mLowerColorChannels[i] && actual[i] <= mHigherColorChannels[i])) {
-                testing::AssertionResult result = testing::AssertionFailure()
-                                                  << "Expected data[" << i << "] to be between "
-                                                  << mValues0[i] << " and " << mValues1[i]
-                                                  << ", actual " << actual[i] << std::endl;
-
-                if (mLowerColorChannels.size() <= 1024) {
-                    result << "Expected between:" << std::endl;
-                    printBuffer(result, mValues0.data(), mLowerColorChannels.size());
-                    result << "and" << std::endl;
-                    printBuffer(result, mValues1.data(), mLowerColorChannels.size());
-
-                    result << "Actual:" << std::endl;
-                    printBuffer(result, actual, mLowerColorChannels.size());
-                }
-
-                return result;
-            }
-        }
-
-        return testing::AssertionSuccess();
-    }
-
-    template class ExpectBetweenColors<RGBA8>;
+template class ExpectBetweenColors<RGBA8>;
 }  // namespace detail
diff --git a/src/dawn/tests/DawnTest.h b/src/dawn/tests/DawnTest.h
index d389b59..bcd83e4 100644
--- a/src/dawn/tests/DawnTest.h
+++ b/src/dawn/tests/DawnTest.h
@@ -120,10 +120,8 @@
 #define ASSERT_DEVICE_ERROR(statement) ASSERT_DEVICE_ERROR_MSG(statement, testing::_)
 
 struct RGBA8 {
-    constexpr RGBA8() : RGBA8(0, 0, 0, 0) {
-    }
-    constexpr RGBA8(uint8_t r, uint8_t g, uint8_t b, uint8_t a) : r(r), g(g), b(b), a(a) {
-    }
+    constexpr RGBA8() : RGBA8(0, 0, 0, 0) {}
+    constexpr RGBA8(uint8_t r, uint8_t g, uint8_t b, uint8_t a) : r(r), g(g), b(b), a(a) {}
     bool operator==(const RGBA8& other) const;
     bool operator!=(const RGBA8& other) const;
     bool operator<=(const RGBA8& other) const;
@@ -194,25 +192,25 @@
 struct GLFWwindow;
 
 namespace utils {
-    class PlatformDebugLogger;
-    class TerribleCommandBuffer;
-    class WireHelper;
+class PlatformDebugLogger;
+class TerribleCommandBuffer;
+class WireHelper;
 }  // namespace utils
 
 namespace detail {
-    class Expectation;
-    class CustomTextureExpectation;
+class Expectation;
+class CustomTextureExpectation;
 
-    template <typename T, typename U = T>
-    class ExpectEq;
-    template <typename T>
-    class ExpectBetweenColors;
+template <typename T, typename U = T>
+class ExpectEq;
+template <typename T>
+class ExpectBetweenColors;
 }  // namespace detail
 
 namespace dawn::wire {
-    class CommandHandler;
-    class WireClient;
-    class WireServer;
+class CommandHandler;
+class WireClient;
+class WireServer;
 }  // namespace dawn::wire
 
 void InitDawnEnd2EndTestEnvironment(int argc, char** argv);
@@ -655,18 +653,13 @@
     DawnTestWithParams();
     ~DawnTestWithParams() override = default;
 
-    void SetUp() override {
-        DawnTestBase::SetUp();
-    }
+    void SetUp() override { DawnTestBase::SetUp(); }
 
-    void TearDown() override {
-        DawnTestBase::TearDown();
-    }
+    void TearDown() override { DawnTestBase::TearDown(); }
 };
 
 template <typename Params>
-DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {
-}
+DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {}
 
 using DawnTest = DawnTestWithParams<>;
 
@@ -727,8 +720,7 @@
         template <typename... Args>                                                                \
         StructName(const AdapterTestParam& param, Args&&... args)                                  \
             : AdapterTestParam(param),                                                             \
-              DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {               \
-        }                                                                                          \
+              DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {}              \
     };                                                                                             \
     std::ostream& operator<<(std::ostream& o, const StructName& param) {                           \
         o << static_cast<const AdapterTestParam&>(param);                                          \
@@ -738,69 +730,69 @@
     static_assert(true, "require semicolon")
 
 namespace detail {
-    // Helper functions used for DAWN_INSTANTIATE_TEST
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams);
+// Helper functions used for DAWN_INSTANTIATE_TEST
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams);
 
-    // All classes used to implement the deferred expectations should inherit from this.
-    class Expectation {
-      public:
-        virtual ~Expectation() = default;
+// All classes used to implement the deferred expectations should inherit from this.
+class Expectation {
+  public:
+    virtual ~Expectation() = default;
 
-        // Will be called with the buffer or texture data the expectation should check.
-        virtual testing::AssertionResult Check(const void* data, size_t size) = 0;
-    };
+    // Will be called with the buffer or texture data the expectation should check.
+    virtual testing::AssertionResult Check(const void* data, size_t size) = 0;
+};
 
-    // Expectation that checks the data is equal to some expected values.
-    // T - expected value Type
-    // U - actual value Type (defaults = T)
-    // This is expanded for float16 mostly where T=float, U=uint16_t
-    template <typename T, typename U>
-    class ExpectEq : public Expectation {
-      public:
-        explicit ExpectEq(T singleValue, T tolerance = {});
-        ExpectEq(const T* values, const unsigned int count, T tolerance = {});
+// Expectation that checks the data is equal to some expected values.
+// T - expected value Type
+// U - actual value Type (defaults = T)
+// This is expanded for float16 mostly where T=float, U=uint16_t
+template <typename T, typename U>
+class ExpectEq : public Expectation {
+  public:
+    explicit ExpectEq(T singleValue, T tolerance = {});
+    ExpectEq(const T* values, const unsigned int count, T tolerance = {});
 
-        testing::AssertionResult Check(const void* data, size_t size) override;
+    testing::AssertionResult Check(const void* data, size_t size) override;
 
-      private:
-        std::vector<T> mExpected;
-        T mTolerance;
-    };
-    extern template class ExpectEq<uint8_t>;
-    extern template class ExpectEq<int16_t>;
-    extern template class ExpectEq<uint32_t>;
-    extern template class ExpectEq<uint64_t>;
-    extern template class ExpectEq<RGBA8>;
-    extern template class ExpectEq<float>;
-    extern template class ExpectEq<float, uint16_t>;
+  private:
+    std::vector<T> mExpected;
+    T mTolerance;
+};
+extern template class ExpectEq<uint8_t>;
+extern template class ExpectEq<int16_t>;
+extern template class ExpectEq<uint32_t>;
+extern template class ExpectEq<uint64_t>;
+extern template class ExpectEq<RGBA8>;
+extern template class ExpectEq<float>;
+extern template class ExpectEq<float, uint16_t>;
 
-    template <typename T>
-    class ExpectBetweenColors : public Expectation {
-      public:
-        // Inclusive for now
-        ExpectBetweenColors(T value0, T value1);
-        testing::AssertionResult Check(const void* data, size_t size) override;
+template <typename T>
+class ExpectBetweenColors : public Expectation {
+  public:
+    // Inclusive for now
+    ExpectBetweenColors(T value0, T value1);
+    testing::AssertionResult Check(const void* data, size_t size) override;
 
-      private:
-        std::vector<T> mLowerColorChannels;
-        std::vector<T> mHigherColorChannels;
+  private:
+    std::vector<T> mLowerColorChannels;
+    std::vector<T> mHigherColorChannels;
 
-        // used for printing error
-        std::vector<T> mValues0;
-        std::vector<T> mValues1;
-    };
-    // A color is considered between color0 and color1 when all channel values are within range of
-    // each counterparts. It doesn't matter which value is higher or lower. Essentially color =
-    // lerp(color0, color1, t) where t is [0,1]. But I don't want to be too strict here.
-    extern template class ExpectBetweenColors<RGBA8>;
+    // used for printing error
+    std::vector<T> mValues0;
+    std::vector<T> mValues1;
+};
+// A color is considered between color0 and color1 when all channel values are within range of
+// each counterparts. It doesn't matter which value is higher or lower. Essentially color =
+// lerp(color0, color1, t) where t is [0,1]. But I don't want to be too strict here.
+extern template class ExpectBetweenColors<RGBA8>;
 
-    class CustomTextureExpectation : public Expectation {
-      public:
-        virtual ~CustomTextureExpectation() = default;
-        virtual uint32_t DataSize() = 0;
-    };
+class CustomTextureExpectation : public Expectation {
+  public:
+    virtual ~CustomTextureExpectation() = default;
+    virtual uint32_t DataSize() = 0;
+};
 
 }  // namespace detail
 
diff --git a/src/dawn/tests/MockCallback.h b/src/dawn/tests/MockCallback.h
index 49840a1..876abc2 100644
--- a/src/dawn/tests/MockCallback.h
+++ b/src/dawn/tests/MockCallback.h
@@ -20,88 +20,86 @@
 #include <tuple>
 #include <utility>
 
-#include "gmock/gmock.h"
 #include "dawn/common/Assert.h"
+#include "gmock/gmock.h"
 
 namespace testing {
 
-    template <typename F>
-    class MockCallback;
+template <typename F>
+class MockCallback;
 
-    // Helper class for mocking callbacks used for Dawn callbacks with |void* userdata|
-    // as the last callback argument.
-    //
-    // Example Usage:
-    //   MockCallback<WGPUDeviceLostCallback> mock;
-    //
-    //   void* foo = XYZ; // this is the callback userdata
-    //
-    //   wgpuDeviceSetDeviceLostCallback(device, mock.Callback(), mock.MakeUserdata(foo));
-    //   EXPECT_CALL(mock, Call(_, foo));
-    template <typename R, typename... Args>
-    class MockCallback<R (*)(Args...)> : public ::testing::MockFunction<R(Args...)> {
-        using CallbackType = R (*)(Args...);
+// Helper class for mocking callbacks used for Dawn callbacks with |void* userdata|
+// as the last callback argument.
+//
+// Example Usage:
+//   MockCallback<WGPUDeviceLostCallback> mock;
+//
+//   void* foo = XYZ; // this is the callback userdata
+//
+//   wgpuDeviceSetDeviceLostCallback(device, mock.Callback(), mock.MakeUserdata(foo));
+//   EXPECT_CALL(mock, Call(_, foo));
+template <typename R, typename... Args>
+class MockCallback<R (*)(Args...)> : public ::testing::MockFunction<R(Args...)> {
+    using CallbackType = R (*)(Args...);
 
-      public:
-        // Helper function makes it easier to get the callback using |foo.Callback()|
-        // unstead of MockCallback<CallbackType>::Callback.
-        static CallbackType Callback() {
-            return CallUnboundCallback;
-        }
+  public:
+    // Helper function makes it easier to get the callback using |foo.Callback()|
+    // unstead of MockCallback<CallbackType>::Callback.
+    static CallbackType Callback() { return CallUnboundCallback; }
 
-        void* MakeUserdata(void* userdata) {
-            auto mockAndUserdata =
-                std::unique_ptr<MockAndUserdata>(new MockAndUserdata{this, userdata});
+    void* MakeUserdata(void* userdata) {
+        auto mockAndUserdata =
+            std::unique_ptr<MockAndUserdata>(new MockAndUserdata{this, userdata});
 
-            // Add the userdata to a set of userdata for this mock. We never
-            // remove from this set even if a callback should only be called once so that
-            // repeated calls to the callback still forward the userdata correctly.
-            // Userdata will be destroyed when the mock is destroyed.
-            auto [result, inserted] = mUserdatas.insert(std::move(mockAndUserdata));
-            ASSERT(inserted);
-            return result->get();
-        }
+        // Add the userdata to a set of userdata for this mock. We never
+        // remove from this set even if a callback should only be called once so that
+        // repeated calls to the callback still forward the userdata correctly.
+        // Userdata will be destroyed when the mock is destroyed.
+        auto [result, inserted] = mUserdatas.insert(std::move(mockAndUserdata));
+        ASSERT(inserted);
+        return result->get();
+    }
 
-      private:
-        struct MockAndUserdata {
-            MockCallback* mock;
-            void* userdata;
-        };
-
-        static R CallUnboundCallback(Args... args) {
-            std::tuple<Args...> tuple = std::make_tuple(args...);
-
-            constexpr size_t ArgC = sizeof...(Args);
-            static_assert(ArgC >= 1, "Mock callback requires at least one argument (the userdata)");
-
-            // Get the userdata. It should be the last argument.
-            auto userdata = std::get<ArgC - 1>(tuple);
-            static_assert(std::is_same<decltype(userdata), void*>::value,
-                          "Last callback argument must be void* userdata");
-
-            // Extract the mock.
-            ASSERT(userdata != nullptr);
-            auto* mockAndUserdata = reinterpret_cast<MockAndUserdata*>(userdata);
-            MockCallback* mock = mockAndUserdata->mock;
-            ASSERT(mock != nullptr);
-
-            // Replace the userdata
-            std::get<ArgC - 1>(tuple) = mockAndUserdata->userdata;
-
-            // Forward the callback to the mock.
-            return mock->CallImpl(std::make_index_sequence<ArgC>{}, std::move(tuple));
-        }
-
-        // This helper cannot be inlined because we dependent on the templated index sequence
-        // to unpack the tuple arguments.
-        template <size_t... Is>
-        R CallImpl(const std::index_sequence<Is...>&, std::tuple<Args...> args) {
-            return this->Call(std::get<Is>(args)...);
-        }
-
-        std::set<std::unique_ptr<MockAndUserdata>> mUserdatas;
+  private:
+    struct MockAndUserdata {
+        MockCallback* mock;
+        void* userdata;
     };
 
+    static R CallUnboundCallback(Args... args) {
+        std::tuple<Args...> tuple = std::make_tuple(args...);
+
+        constexpr size_t ArgC = sizeof...(Args);
+        static_assert(ArgC >= 1, "Mock callback requires at least one argument (the userdata)");
+
+        // Get the userdata. It should be the last argument.
+        auto userdata = std::get<ArgC - 1>(tuple);
+        static_assert(std::is_same<decltype(userdata), void*>::value,
+                      "Last callback argument must be void* userdata");
+
+        // Extract the mock.
+        ASSERT(userdata != nullptr);
+        auto* mockAndUserdata = reinterpret_cast<MockAndUserdata*>(userdata);
+        MockCallback* mock = mockAndUserdata->mock;
+        ASSERT(mock != nullptr);
+
+        // Replace the userdata
+        std::get<ArgC - 1>(tuple) = mockAndUserdata->userdata;
+
+        // Forward the callback to the mock.
+        return mock->CallImpl(std::make_index_sequence<ArgC>{}, std::move(tuple));
+    }
+
+    // This helper cannot be inlined because we dependent on the templated index sequence
+    // to unpack the tuple arguments.
+    template <size_t... Is>
+    R CallImpl(const std::index_sequence<Is...>&, std::tuple<Args...> args) {
+        return this->Call(std::get<Is>(args)...);
+    }
+
+    std::set<std::unique_ptr<MockAndUserdata>> mUserdatas;
+};
+
 }  // namespace testing
 
 #endif  // SRC_DAWN_TESTS_MOCKCALLBACK_H_
diff --git a/src/dawn/tests/ParamGenerator.h b/src/dawn/tests/ParamGenerator.h
index 8a3edba..fd06c71 100644
--- a/src/dawn/tests/ParamGenerator.h
+++ b/src/dawn/tests/ParamGenerator.h
@@ -76,20 +76,15 @@
             return mEnd == other.mEnd && mIndex == other.mIndex;
         }
 
-        bool operator!=(const Iterator& other) const {
-            return !(*this == other);
-        }
+        bool operator!=(const Iterator& other) const { return !(*this == other); }
 
-        ParamStruct operator*() const {
-            return GetParam(mParams, mIndex, s_indexSequence);
-        }
+        ParamStruct operator*() const { return GetParam(mParams, mIndex, s_indexSequence); }
 
       private:
         friend class ParamGenerator;
 
         Iterator(ParamTuple params, Index index)
-            : mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {
-        }
+            : mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {}
 
         ParamTuple mParams;
         Index mIndex;
@@ -119,9 +114,9 @@
 struct AdapterTestParam;
 
 namespace detail {
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams);
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams);
 }
 
 template <typename Param, typename... Params>
diff --git a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
index f283803..6e85b6a 100644
--- a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
+++ b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
@@ -25,76 +25,105 @@
 #include "dawn/webgpu_cpp.h"
 
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-#    include "dawn/native/VulkanBackend.h"
+#include "dawn/native/VulkanBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-#    include "dawn/native/D3D12Backend.h"
+#include "dawn/native/D3D12Backend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    include "dawn/native/MetalBackend.h"
+#include "dawn/native/MetalBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
-#    include "GLFW/glfw3.h"
-#    include "dawn/native/OpenGLBackend.h"
+#include "GLFW/glfw3.h"
+#include "dawn/native/OpenGLBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
 
 #include <gtest/gtest.h>
 
 namespace {
 
-    using testing::_;
-    using testing::MockCallback;
-    using testing::SaveArg;
+using testing::_;
+using testing::MockCallback;
+using testing::SaveArg;
 
-    class AdapterDiscoveryTests : public ::testing::Test {};
+class AdapterDiscoveryTests : public ::testing::Test {};
 
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-    // Test only discovering the SwiftShader adapter
-    TEST(AdapterDiscoveryTests, OnlySwiftShader) {
-        dawn::native::Instance instance;
+// Test only discovering the SwiftShader adapter
+TEST(AdapterDiscoveryTests, OnlySwiftShader) {
+    dawn::native::Instance instance;
 
-        dawn::native::vulkan::AdapterDiscoveryOptions options;
-        options.forceSwiftShader = true;
-        instance.DiscoverAdapters(&options);
+    dawn::native::vulkan::AdapterDiscoveryOptions options;
+    options.forceSwiftShader = true;
+    instance.DiscoverAdapters(&options);
 
-        const auto& adapters = instance.GetAdapters();
-        EXPECT_LE(adapters.size(), 1u);  // 0 or 1 SwiftShader adapters.
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    const auto& adapters = instance.GetAdapters();
+    EXPECT_LE(adapters.size(), 1u);  // 0 or 1 SwiftShader adapters.
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
-            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
-        }
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+        EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
     }
+}
 
-    // Test discovering only Vulkan adapters
-    TEST(AdapterDiscoveryTests, OnlyVulkan) {
-        dawn::native::Instance instance;
+// Test discovering only Vulkan adapters
+TEST(AdapterDiscoveryTests, OnlyVulkan) {
+    dawn::native::Instance instance;
 
-        dawn::native::vulkan::AdapterDiscoveryOptions options;
-        instance.DiscoverAdapters(&options);
+    dawn::native::vulkan::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
 
-        const auto& adapters = instance.GetAdapters();
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
-        }
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
     }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-    // Test discovering only D3D12 adapters
-    TEST(AdapterDiscoveryTests, OnlyD3D12) {
+// Test discovering only D3D12 adapters
+TEST(AdapterDiscoveryTests, OnlyD3D12) {
+    dawn::native::Instance instance;
+
+    dawn::native::d3d12::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
+    }
+}
+
+// Test discovering a D3D12 adapter from a prexisting DXGI adapter
+TEST(AdapterDiscoveryTests, MatchingDXGIAdapter) {
+    using Microsoft::WRL::ComPtr;
+
+    ComPtr<IDXGIFactory4> dxgiFactory;
+    HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+    ASSERT_EQ(hr, S_OK);
+
+    for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+        ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+        if (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+            break;  // No more adapters to enumerate.
+        }
+
         dawn::native::Instance instance;
 
         dawn::native::d3d12::AdapterDiscoveryOptions options;
+        options.dxgiAdapter = std::move(dxgiAdapter);
         instance.DiscoverAdapters(&options);
 
         const auto& adapters = instance.GetAdapters();
@@ -105,312 +134,280 @@
             EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
         }
     }
-
-    // Test discovering a D3D12 adapter from a prexisting DXGI adapter
-    TEST(AdapterDiscoveryTests, MatchingDXGIAdapter) {
-        using Microsoft::WRL::ComPtr;
-
-        ComPtr<IDXGIFactory4> dxgiFactory;
-        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
-        ASSERT_EQ(hr, S_OK);
-
-        for (uint32_t adapterIndex = 0;; ++adapterIndex) {
-            ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
-            if (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
-                break;  // No more adapters to enumerate.
-            }
-
-            dawn::native::Instance instance;
-
-            dawn::native::d3d12::AdapterDiscoveryOptions options;
-            options.dxgiAdapter = std::move(dxgiAdapter);
-            instance.DiscoverAdapters(&options);
-
-            const auto& adapters = instance.GetAdapters();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
-            }
-        }
-    }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-    // Test discovering only Metal adapters
-    TEST(AdapterDiscoveryTests, OnlyMetal) {
-        dawn::native::Instance instance;
+// Test discovering only Metal adapters
+TEST(AdapterDiscoveryTests, OnlyMetal) {
+    dawn::native::Instance instance;
 
+    dawn::native::metal::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Metal);
+    }
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+// Test discovering only desktop OpenGL adapters
+TEST(AdapterDiscoveryTests, OnlyDesktopGL) {
+    if (!glfwInit()) {
+        GTEST_SKIP() << "glfwInit() failed";
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    GLFWwindow* window = glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
+    glfwMakeContextCurrent(window);
+
+    dawn::native::Instance instance;
+
+    dawn::native::opengl::AdapterDiscoveryOptions options;
+    options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance.DiscoverAdapters(&options);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGL);
+    }
+
+    glfwDestroyWindow(window);
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+// Test discovering only OpenGLES adapters
+TEST(AdapterDiscoveryTests, OnlyOpenGLES) {
+    ScopedEnvironmentVar angleDefaultPlatform;
+    if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
+        angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
+    }
+
+    if (!glfwInit()) {
+        GTEST_SKIP() << "glfwInit() failed";
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+    glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+    glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    GLFWwindow* window = glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
+    glfwMakeContextCurrent(window);
+
+    dawn::native::Instance instance;
+
+    dawn::native::opengl::AdapterDiscoveryOptionsES options;
+    options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance.DiscoverAdapters(&options);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGLES);
+    }
+
+    glfwDestroyWindow(window);
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(DAWN_ENABLE_BACKEND_VULKAN)
+// Test discovering the Metal backend, then the Vulkan backend
+// does not duplicate adapters.
+TEST(AdapterDiscoveryTests, OneBackendThenTheOther) {
+    dawn::native::Instance instance;
+    uint32_t metalAdapterCount = 0;
+    {
         dawn::native::metal::AdapterDiscoveryOptions options;
         instance.DiscoverAdapters(&options);
 
         const auto& adapters = instance.GetAdapters();
+        metalAdapterCount = adapters.size();
         for (const auto& adapter : adapters) {
             wgpu::AdapterProperties properties;
             adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Metal);
+            ASSERT_EQ(properties.backendType, wgpu::BackendType::Metal);
         }
     }
-#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-    // Test discovering only desktop OpenGL adapters
-    TEST(AdapterDiscoveryTests, OnlyDesktopGL) {
-        if (!glfwInit()) {
-            GTEST_SKIP() << "glfwInit() failed";
-        }
-        glfwDefaultWindowHints();
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
-        glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
-        glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
-
-        GLFWwindow* window =
-            glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
-        glfwMakeContextCurrent(window);
-
-        dawn::native::Instance instance;
-
-        dawn::native::opengl::AdapterDiscoveryOptions options;
-        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    {
+        dawn::native::vulkan::AdapterDiscoveryOptions options;
         instance.DiscoverAdapters(&options);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
 
+        uint32_t metalAdapterCount2 = 0;
         const auto& adapters = instance.GetAdapters();
         for (const auto& adapter : adapters) {
             wgpu::AdapterProperties properties;
             adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGL);
-        }
-
-        glfwDestroyWindow(window);
-    }
-#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
-    // Test discovering only OpenGLES adapters
-    TEST(AdapterDiscoveryTests, OnlyOpenGLES) {
-        ScopedEnvironmentVar angleDefaultPlatform;
-        if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
-            angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
-        }
-
-        if (!glfwInit()) {
-            GTEST_SKIP() << "glfwInit() failed";
-        }
-        glfwDefaultWindowHints();
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
-        glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
-        glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
-
-        GLFWwindow* window =
-            glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
-        glfwMakeContextCurrent(window);
-
-        dawn::native::Instance instance;
-
-        dawn::native::opengl::AdapterDiscoveryOptionsES options;
-        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
-        instance.DiscoverAdapters(&options);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
-
-        const auto& adapters = instance.GetAdapters();
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGLES);
-        }
-
-        glfwDestroyWindow(window);
-    }
-#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(DAWN_ENABLE_BACKEND_VULKAN)
-    // Test discovering the Metal backend, then the Vulkan backend
-    // does not duplicate adapters.
-    TEST(AdapterDiscoveryTests, OneBackendThenTheOther) {
-        dawn::native::Instance instance;
-        uint32_t metalAdapterCount = 0;
-        {
-            dawn::native::metal::AdapterDiscoveryOptions options;
-            instance.DiscoverAdapters(&options);
-
-            const auto& adapters = instance.GetAdapters();
-            metalAdapterCount = adapters.size();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                ASSERT_EQ(properties.backendType, wgpu::BackendType::Metal);
+            EXPECT_TRUE(properties.backendType == wgpu::BackendType::Metal ||
+                        properties.backendType == wgpu::BackendType::Vulkan);
+            if (properties.backendType == wgpu::BackendType::Metal) {
+                metalAdapterCount2++;
             }
         }
-        {
-            dawn::native::vulkan::AdapterDiscoveryOptions options;
-            instance.DiscoverAdapters(&options);
-
-            uint32_t metalAdapterCount2 = 0;
-            const auto& adapters = instance.GetAdapters();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                EXPECT_TRUE(properties.backendType == wgpu::BackendType::Metal ||
-                            properties.backendType == wgpu::BackendType::Vulkan);
-                if (properties.backendType == wgpu::BackendType::Metal) {
-                    metalAdapterCount2++;
-                }
-            }
-            EXPECT_EQ(metalAdapterCount, metalAdapterCount2);
-        }
+        EXPECT_EQ(metalAdapterCount, metalAdapterCount2);
     }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN) && defined(DAWN_ENABLE_BACKEND_METAL)
 
-    class AdapterCreationTest : public ::testing::Test {
-      protected:
-        void SetUp() override {
-            dawnProcSetProcs(&dawn_native::GetProcs());
+class AdapterCreationTest : public ::testing::Test {
+  protected:
+    void SetUp() override {
+        dawnProcSetProcs(&dawn_native::GetProcs());
 
-            {
-                auto nativeInstance = std::make_unique<dawn_native::Instance>();
-                nativeInstance->DiscoverDefaultAdapters();
-                for (dawn_native::Adapter& nativeAdapter : nativeInstance->GetAdapters()) {
-                    anyAdapterAvailable = true;
+        {
+            auto nativeInstance = std::make_unique<dawn_native::Instance>();
+            nativeInstance->DiscoverDefaultAdapters();
+            for (dawn_native::Adapter& nativeAdapter : nativeInstance->GetAdapters()) {
+                anyAdapterAvailable = true;
 
-                    wgpu::AdapterProperties properties;
-                    nativeAdapter.GetProperties(&properties);
-                    swiftShaderAvailable =
-                        swiftShaderAvailable ||
-                        gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID);
-                    discreteGPUAvailable = discreteGPUAvailable ||
-                                           properties.adapterType == wgpu::AdapterType::DiscreteGPU;
-                    integratedGPUAvailable =
-                        integratedGPUAvailable ||
-                        properties.adapterType == wgpu::AdapterType::IntegratedGPU;
-                }
+                wgpu::AdapterProperties properties;
+                nativeAdapter.GetProperties(&properties);
+                swiftShaderAvailable =
+                    swiftShaderAvailable ||
+                    gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID);
+                discreteGPUAvailable = discreteGPUAvailable ||
+                                       properties.adapterType == wgpu::AdapterType::DiscreteGPU;
+                integratedGPUAvailable = integratedGPUAvailable ||
+                                         properties.adapterType == wgpu::AdapterType::IntegratedGPU;
             }
-
-            instance = wgpu::CreateInstance();
         }
 
-        void TearDown() override {
-            instance = nullptr;
-            dawnProcSetProcs(nullptr);
-        }
-
-        wgpu::Instance instance;
-        bool anyAdapterAvailable = false;
-        bool swiftShaderAvailable = false;
-        bool discreteGPUAvailable = false;
-        bool integratedGPUAvailable = false;
-    };
-
-    // Test that requesting the default adapter works
-    TEST_F(AdapterCreationTest, DefaultAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-
-        MockCallback<WGPURequestAdapterCallback> cb;
-
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
-
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+        instance = wgpu::CreateInstance();
     }
 
-    // Test that passing nullptr for the options gets the default adapter
-    TEST_F(AdapterCreationTest, NullGivesDefaultAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-
-        MockCallback<WGPURequestAdapterCallback> cb;
-
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
-
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this + 1))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(nullptr, cb.Callback(), cb.MakeUserdata(this + 1));
-
-        wgpu::Adapter adapter2 = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter.Get(), adapter2.Get());
+    void TearDown() override {
+        instance = nullptr;
+        dawnProcSetProcs(nullptr);
     }
 
-    // Test that requesting the fallback adapter returns SwiftShader.
-    TEST_F(AdapterCreationTest, FallbackAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-        options.forceFallbackAdapter = true;
+    wgpu::Instance instance;
+    bool anyAdapterAvailable = false;
+    bool swiftShaderAvailable = false;
+    bool discreteGPUAvailable = false;
+    bool integratedGPUAvailable = false;
+};
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+// Test that requesting the default adapter works
+TEST_F(AdapterCreationTest, DefaultAdapter) {
+    wgpu::RequestAdapterOptions options = {};
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, swiftShaderAvailable);
-        if (adapter != nullptr) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
-            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+}
+
+// Test that passing nullptr for the options gets the default adapter
+TEST_F(AdapterCreationTest, NullGivesDefaultAdapter) {
+    wgpu::RequestAdapterOptions options = {};
+
+    MockCallback<WGPURequestAdapterCallback> cb;
+
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this + 1))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(nullptr, cb.Callback(), cb.MakeUserdata(this + 1));
+
+    wgpu::Adapter adapter2 = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter.Get(), adapter2.Get());
+}
+
+// Test that requesting the fallback adapter returns SwiftShader.
+TEST_F(AdapterCreationTest, FallbackAdapter) {
+    wgpu::RequestAdapterOptions options = {};
+    options.forceFallbackAdapter = true;
+
+    MockCallback<WGPURequestAdapterCallback> cb;
+
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, swiftShaderAvailable);
+    if (adapter != nullptr) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+        EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
     }
+}
 
-    // Test that requesting a high performance GPU works
-    TEST_F(AdapterCreationTest, PreferHighPerformance) {
-        wgpu::RequestAdapterOptions options = {};
-        options.powerPreference = wgpu::PowerPreference::HighPerformance;
+// Test that requesting a high performance GPU works
+TEST_F(AdapterCreationTest, PreferHighPerformance) {
+    wgpu::RequestAdapterOptions options = {};
+    options.powerPreference = wgpu::PowerPreference::HighPerformance;
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-        if (discreteGPUAvailable) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::DiscreteGPU);
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+    if (discreteGPUAvailable) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::DiscreteGPU);
     }
+}
 
-    // Test that requesting a low power GPU works
-    TEST_F(AdapterCreationTest, PreferLowPower) {
-        wgpu::RequestAdapterOptions options = {};
-        options.powerPreference = wgpu::PowerPreference::LowPower;
+// Test that requesting a low power GPU works
+TEST_F(AdapterCreationTest, PreferLowPower) {
+    wgpu::RequestAdapterOptions options = {};
+    options.powerPreference = wgpu::PowerPreference::LowPower;
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-        if (integratedGPUAvailable) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::IntegratedGPU);
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+    if (integratedGPUAvailable) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::IntegratedGPU);
     }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/end2end/BufferTests.cpp b/src/dawn/tests/end2end/BufferTests.cpp
index 9853495..b8d7a26 100644
--- a/src/dawn/tests/end2end/BufferTests.cpp
+++ b/src/dawn/tests/end2end/BufferTests.cpp
@@ -556,9 +556,7 @@
         return buffer.GetConstMappedRange(0, size);
     }
 
-    void UnmapBuffer(const wgpu::Buffer& buffer) {
-        buffer.Unmap();
-    }
+    void UnmapBuffer(const wgpu::Buffer& buffer) { buffer.Unmap(); }
 
     wgpu::Buffer BufferMappedAtCreation(wgpu::BufferUsage usage, uint64_t size) {
         wgpu::BufferDescriptor descriptor;
diff --git a/src/dawn/tests/end2end/BufferZeroInitTests.cpp b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
index 09dcc4a..71100b7 100644
--- a/src/dawn/tests/end2end/BufferZeroInitTests.cpp
+++ b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
@@ -34,14 +34,14 @@
 
 namespace {
 
-    struct BufferZeroInitInCopyT2BSpec {
-        wgpu::Extent3D textureSize;
-        uint64_t bufferOffset;
-        uint64_t extraBytes;
-        uint32_t bytesPerRow;
-        uint32_t rowsPerImage;
-        uint32_t lazyClearCount;
-    };
+struct BufferZeroInitInCopyT2BSpec {
+    wgpu::Extent3D textureSize;
+    uint64_t bufferOffset;
+    uint64_t extraBytes;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+    uint32_t lazyClearCount;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/ColorStateTests.cpp b/src/dawn/tests/end2end/ColorStateTests.cpp
index 142ff47..1753c9a 100644
--- a/src/dawn/tests/end2end/ColorStateTests.cpp
+++ b/src/dawn/tests/end2end/ColorStateTests.cpp
@@ -18,9 +18,9 @@
 #include <utility>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -220,83 +220,83 @@
 };
 
 namespace {
-    // Add two colors and clamp
-    constexpr RGBA8 operator+(const RGBA8& col1, const RGBA8& col2) {
-        int r = static_cast<int>(col1.r) + static_cast<int>(col2.r);
-        int g = static_cast<int>(col1.g) + static_cast<int>(col2.g);
-        int b = static_cast<int>(col1.b) + static_cast<int>(col2.b);
-        int a = static_cast<int>(col1.a) + static_cast<int>(col2.a);
-        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
-        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
-        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
-        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+// Add two colors and clamp
+constexpr RGBA8 operator+(const RGBA8& col1, const RGBA8& col2) {
+    int r = static_cast<int>(col1.r) + static_cast<int>(col2.r);
+    int g = static_cast<int>(col1.g) + static_cast<int>(col2.g);
+    int b = static_cast<int>(col1.b) + static_cast<int>(col2.b);
+    int a = static_cast<int>(col1.a) + static_cast<int>(col2.a);
+    r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+    g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+    b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+    a = (a > 255 ? 255 : (a < 0 ? 0 : a));
 
-        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
-                     static_cast<uint8_t>(a));
-    }
+    return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                 static_cast<uint8_t>(a));
+}
 
-    // Subtract two colors and clamp
-    constexpr RGBA8 operator-(const RGBA8& col1, const RGBA8& col2) {
-        int r = static_cast<int>(col1.r) - static_cast<int>(col2.r);
-        int g = static_cast<int>(col1.g) - static_cast<int>(col2.g);
-        int b = static_cast<int>(col1.b) - static_cast<int>(col2.b);
-        int a = static_cast<int>(col1.a) - static_cast<int>(col2.a);
-        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
-        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
-        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
-        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+// Subtract two colors and clamp
+constexpr RGBA8 operator-(const RGBA8& col1, const RGBA8& col2) {
+    int r = static_cast<int>(col1.r) - static_cast<int>(col2.r);
+    int g = static_cast<int>(col1.g) - static_cast<int>(col2.g);
+    int b = static_cast<int>(col1.b) - static_cast<int>(col2.b);
+    int a = static_cast<int>(col1.a) - static_cast<int>(col2.a);
+    r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+    g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+    b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+    a = (a > 255 ? 255 : (a < 0 ? 0 : a));
 
-        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
-                     static_cast<uint8_t>(a));
-    }
+    return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                 static_cast<uint8_t>(a));
+}
 
-    // Get the component-wise minimum of two colors
-    RGBA8 min(const RGBA8& col1, const RGBA8& col2) {
-        return RGBA8(std::min(col1.r, col2.r), std::min(col1.g, col2.g), std::min(col1.b, col2.b),
-                     std::min(col1.a, col2.a));
-    }
+// Get the component-wise minimum of two colors
+RGBA8 min(const RGBA8& col1, const RGBA8& col2) {
+    return RGBA8(std::min(col1.r, col2.r), std::min(col1.g, col2.g), std::min(col1.b, col2.b),
+                 std::min(col1.a, col2.a));
+}
 
-    // Get the component-wise maximum of two colors
-    RGBA8 max(const RGBA8& col1, const RGBA8& col2) {
-        return RGBA8(std::max(col1.r, col2.r), std::max(col1.g, col2.g), std::max(col1.b, col2.b),
-                     std::max(col1.a, col2.a));
-    }
+// Get the component-wise maximum of two colors
+RGBA8 max(const RGBA8& col1, const RGBA8& col2) {
+    return RGBA8(std::max(col1.r, col2.r), std::max(col1.g, col2.g), std::max(col1.b, col2.b),
+                 std::max(col1.a, col2.a));
+}
 
-    // Blend two RGBA8 color values parameterized by the provided factors in the range [0.f, 1.f]
-    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, std::array<float, 4> fac) {
-        float r = static_cast<float>(col1.r) * (1.f - fac[0]) + static_cast<float>(col2.r) * fac[0];
-        float g = static_cast<float>(col1.g) * (1.f - fac[1]) + static_cast<float>(col2.g) * fac[1];
-        float b = static_cast<float>(col1.b) * (1.f - fac[2]) + static_cast<float>(col2.b) * fac[2];
-        float a = static_cast<float>(col1.a) * (1.f - fac[3]) + static_cast<float>(col2.a) * fac[3];
+// Blend two RGBA8 color values parameterized by the provided factors in the range [0.f, 1.f]
+RGBA8 mix(const RGBA8& col1, const RGBA8& col2, std::array<float, 4> fac) {
+    float r = static_cast<float>(col1.r) * (1.f - fac[0]) + static_cast<float>(col2.r) * fac[0];
+    float g = static_cast<float>(col1.g) * (1.f - fac[1]) + static_cast<float>(col2.g) * fac[1];
+    float b = static_cast<float>(col1.b) * (1.f - fac[2]) + static_cast<float>(col2.b) * fac[2];
+    float a = static_cast<float>(col1.a) * (1.f - fac[3]) + static_cast<float>(col2.a) * fac[3];
 
-        return RGBA8({static_cast<uint8_t>(std::round(r)), static_cast<uint8_t>(std::round(g)),
-                      static_cast<uint8_t>(std::round(b)), static_cast<uint8_t>(std::round(a))});
-    }
+    return RGBA8({static_cast<uint8_t>(std::round(r)), static_cast<uint8_t>(std::round(g)),
+                  static_cast<uint8_t>(std::round(b)), static_cast<uint8_t>(std::round(a))});
+}
 
-    // Blend two RGBA8 color values parameterized by the provided RGBA8 factor
-    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, const RGBA8& fac) {
-        std::array<float, 4> f = {{
-            static_cast<float>(fac.r) / 255.f,
-            static_cast<float>(fac.g) / 255.f,
-            static_cast<float>(fac.b) / 255.f,
-            static_cast<float>(fac.a) / 255.f,
-        }};
-        return mix(col1, col2, f);
-    }
-
-    constexpr std::array<RGBA8, 8> kColors = {{
-        // check operations over multiple channels
-        RGBA8(64, 0, 0, 0),
-        RGBA8(0, 64, 0, 0),
-        RGBA8(64, 0, 32, 0),
-        RGBA8(0, 64, 32, 0),
-        RGBA8(128, 0, 128, 128),
-        RGBA8(0, 128, 128, 128),
-
-        // check cases that may cause overflow
-        RGBA8(0, 0, 0, 0),
-        RGBA8(255, 255, 255, 255),
+// Blend two RGBA8 color values parameterized by the provided RGBA8 factor
+RGBA8 mix(const RGBA8& col1, const RGBA8& col2, const RGBA8& fac) {
+    std::array<float, 4> f = {{
+        static_cast<float>(fac.r) / 255.f,
+        static_cast<float>(fac.g) / 255.f,
+        static_cast<float>(fac.b) / 255.f,
+        static_cast<float>(fac.a) / 255.f,
     }};
+    return mix(col1, col2, f);
+}
+
+constexpr std::array<RGBA8, 8> kColors = {{
+    // check operations over multiple channels
+    RGBA8(64, 0, 0, 0),
+    RGBA8(0, 64, 0, 0),
+    RGBA8(64, 0, 32, 0),
+    RGBA8(0, 64, 32, 0),
+    RGBA8(128, 0, 128, 128),
+    RGBA8(0, 128, 128, 128),
+
+    // check cases that may cause overflow
+    RGBA8(0, 0, 0, 0),
+    RGBA8(255, 255, 255, 255),
+}};
 }  // namespace
 
 // Test compilation and usage of the fixture
diff --git a/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
index 348ca98..310a534 100644
--- a/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
+++ b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
@@ -35,8 +35,8 @@
 };
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(CompressedTextureFormatTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(CompressedTextureFormatTestParams, TextureFormat);
 }  // namespace
 
 class CompressedTextureFormatTest : public DawnTestWithParams<CompressedTextureFormatTestParams> {
@@ -61,9 +61,7 @@
         return {};
     }
 
-    bool IsFormatSupported() const {
-        return mIsFormatSupported;
-    }
+    bool IsFormatSupported() const { return mIsFormatSupported; }
 
     uint32_t BlockWidthInTexels() const {
         ASSERT(IsFormatSupported());
@@ -1163,9 +1161,7 @@
         return features;
     }
 
-    bool IsBCFormatSupported() const {
-        return mIsBCFormatSupported;
-    }
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
 
     bool mIsBCFormatSupported = false;
 };
diff --git a/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
index d9cde6c..41a88d0 100644
--- a/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
+++ b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
@@ -18,59 +18,57 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
 
-    // Helper for replacing all occurrences of substr in str with replacement
-    std::string ReplaceAll(std::string str,
-                           const std::string& substr,
-                           const std::string& replacement) {
-        size_t pos = 0;
-        while ((pos = str.find(substr, pos)) != std::string::npos) {
-            str.replace(pos, substr.length(), replacement);
-            pos += replacement.length();
-        }
-        return str;
+// Helper for replacing all occurrences of substr in str with replacement
+std::string ReplaceAll(std::string str, const std::string& substr, const std::string& replacement) {
+    size_t pos = 0;
+    while ((pos = str.find(substr, pos)) != std::string::npos) {
+        str.replace(pos, substr.length(), replacement);
+        pos += replacement.length();
     }
+    return str;
+}
 
-    // DataMatcherCallback is the callback function by DataMatcher.
-    // It is called for each contiguous sequence of bytes that should be checked
-    // for equality.
-    // offset and size are in units of bytes.
-    using DataMatcherCallback = std::function<void(uint32_t offset, uint32_t size)>;
+// DataMatcherCallback is the callback function by DataMatcher.
+// It is called for each contiguous sequence of bytes that should be checked
+// for equality.
+// offset and size are in units of bytes.
+using DataMatcherCallback = std::function<void(uint32_t offset, uint32_t size)>;
 
-    // DataMatcher is a function pointer to a data matching function.
-    // size is the total number of bytes being considered for matching.
-    // The callback may be called once or multiple times, and may only consider
-    // part of the interval [0, size)
-    using DataMatcher = void (*)(uint32_t size, DataMatcherCallback);
+// DataMatcher is a function pointer to a data matching function.
+// size is the total number of bytes being considered for matching.
+// The callback may be called once or multiple times, and may only consider
+// part of the interval [0, size)
+using DataMatcher = void (*)(uint32_t size, DataMatcherCallback);
 
-    // FullDataMatcher is a DataMatcher that calls callback with the interval
-    // [0, size)
-    void FullDataMatcher(uint32_t size, DataMatcherCallback callback) {
-        callback(0, size);
+// FullDataMatcher is a DataMatcher that calls callback with the interval
+// [0, size)
+void FullDataMatcher(uint32_t size, DataMatcherCallback callback) {
+    callback(0, size);
+}
+
+// StridedDataMatcher is a DataMatcher that calls callback with the strided
+// intervals of length BYTES_TO_MATCH, skipping BYTES_TO_SKIP.
+// For example: StridedDataMatcher<2, 4>(18, callback) will call callback
+// with the intervals: [0, 2), [6, 8), [12, 14)
+template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
+void StridedDataMatcher(uint32_t size, DataMatcherCallback callback) {
+    uint32_t offset = 0;
+    while (offset < size) {
+        callback(offset, BYTES_TO_MATCH);
+        offset += BYTES_TO_MATCH + BYTES_TO_SKIP;
     }
+}
 
-    // StridedDataMatcher is a DataMatcher that calls callback with the strided
-    // intervals of length BYTES_TO_MATCH, skipping BYTES_TO_SKIP.
-    // For example: StridedDataMatcher<2, 4>(18, callback) will call callback
-    // with the intervals: [0, 2), [6, 8), [12, 14)
-    template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
-    void StridedDataMatcher(uint32_t size, DataMatcherCallback callback) {
-        uint32_t offset = 0;
-        while (offset < size) {
-            callback(offset, BYTES_TO_MATCH);
-            offset += BYTES_TO_MATCH + BYTES_TO_SKIP;
-        }
-    }
-
-    // Align returns the WGSL decoration for an explicit structure field alignment
-    std::string AlignDeco(uint32_t value) {
-        return "@align(" + std::to_string(value) + ") ";
-    }
+// Align returns the WGSL decoration for an explicit structure field alignment
+std::string AlignDeco(uint32_t value) {
+    return "@align(" + std::to_string(value) + ") ";
+}
 
 }  // namespace
 
@@ -135,9 +133,7 @@
 
 class ComputeLayoutMemoryBufferTests
     : public DawnTestWithParams<ComputeLayoutMemoryBufferTestParams> {
-    void SetUp() override {
-        DawnTestBase::SetUp();
-    }
+    void SetUp() override { DawnTestBase::SetUp(); }
 };
 
 TEST_P(ComputeLayoutMemoryBufferTests, Fields) {
@@ -345,164 +341,153 @@
 
 namespace {
 
-    auto GenerateParams() {
-        auto params = MakeParamGenerator<ComputeLayoutMemoryBufferTestParams>(
-            {
-                D3D12Backend(), MetalBackend(), VulkanBackend(),
-                // TODO(crbug.com/dawn/942)
-                // There was a compiler error: Buffer block cannot be expressed as any of std430,
-                // std140, scalar, even with enhanced layouts. You can try flattening this block to
-                // support a more flexible layout.
-                // OpenGLBackend(),
-                // OpenGLESBackend(),
-            },
-            {StorageClass::Storage, StorageClass::Uniform},
-            {
-                // See https://www.w3.org/TR/WGSL/#alignment-and-size
-                // Scalar types with no custom alignment or size
-                Field{"i32", /* align */ 4, /* size */ 4},
-                Field{"u32", /* align */ 4, /* size */ 4},
-                Field{"f32", /* align */ 4, /* size */ 4},
+auto GenerateParams() {
+    auto params = MakeParamGenerator<ComputeLayoutMemoryBufferTestParams>(
+        {
+            D3D12Backend(), MetalBackend(), VulkanBackend(),
+            // TODO(crbug.com/dawn/942)
+            // There was a compiler error: Buffer block cannot be expressed as any of std430,
+            // std140, scalar, even with enhanced layouts. You can try flattening this block to
+            // support a more flexible layout.
+            // OpenGLBackend(),
+            // OpenGLESBackend(),
+        },
+        {StorageClass::Storage, StorageClass::Uniform},
+        {
+            // See https://www.w3.org/TR/WGSL/#alignment-and-size
+            // Scalar types with no custom alignment or size
+            Field{"i32", /* align */ 4, /* size */ 4},
+            Field{"u32", /* align */ 4, /* size */ 4},
+            Field{"f32", /* align */ 4, /* size */ 4},
 
-                // Scalar types with custom alignment
-                Field{"i32", /* align */ 16, /* size */ 4},
-                Field{"u32", /* align */ 16, /* size */ 4},
-                Field{"f32", /* align */ 16, /* size */ 4},
+            // Scalar types with custom alignment
+            Field{"i32", /* align */ 16, /* size */ 4},
+            Field{"u32", /* align */ 16, /* size */ 4},
+            Field{"f32", /* align */ 16, /* size */ 4},
 
-                // Scalar types with custom size
-                Field{"i32", /* align */ 4, /* size */ 4}.PaddedSize(24),
-                Field{"u32", /* align */ 4, /* size */ 4}.PaddedSize(24),
-                Field{"f32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            // Scalar types with custom size
+            Field{"i32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            Field{"u32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            Field{"f32", /* align */ 4, /* size */ 4}.PaddedSize(24),
 
-                // Vector types with no custom alignment or size
-                Field{"vec2<i32>", /* align */ 8, /* size */ 8},
-                Field{"vec2<u32>", /* align */ 8, /* size */ 8},
-                Field{"vec2<f32>", /* align */ 8, /* size */ 8},
-                Field{"vec3<i32>", /* align */ 16, /* size */ 12},
-                Field{"vec3<u32>", /* align */ 16, /* size */ 12},
-                Field{"vec3<f32>", /* align */ 16, /* size */ 12},
-                Field{"vec4<i32>", /* align */ 16, /* size */ 16},
-                Field{"vec4<u32>", /* align */ 16, /* size */ 16},
-                Field{"vec4<f32>", /* align */ 16, /* size */ 16},
+            // Vector types with no custom alignment or size
+            Field{"vec2<i32>", /* align */ 8, /* size */ 8},
+            Field{"vec2<u32>", /* align */ 8, /* size */ 8},
+            Field{"vec2<f32>", /* align */ 8, /* size */ 8},
+            Field{"vec3<i32>", /* align */ 16, /* size */ 12},
+            Field{"vec3<u32>", /* align */ 16, /* size */ 12},
+            Field{"vec3<f32>", /* align */ 16, /* size */ 12},
+            Field{"vec4<i32>", /* align */ 16, /* size */ 16},
+            Field{"vec4<u32>", /* align */ 16, /* size */ 16},
+            Field{"vec4<f32>", /* align */ 16, /* size */ 16},
 
-                // Vector types with custom alignment
-                Field{"vec2<i32>", /* align */ 32, /* size */ 8},
-                Field{"vec2<u32>", /* align */ 32, /* size */ 8},
-                Field{"vec2<f32>", /* align */ 32, /* size */ 8},
-                Field{"vec3<i32>", /* align */ 32, /* size */ 12},
-                Field{"vec3<u32>", /* align */ 32, /* size */ 12},
-                Field{"vec3<f32>", /* align */ 32, /* size */ 12},
-                Field{"vec4<i32>", /* align */ 32, /* size */ 16},
-                Field{"vec4<u32>", /* align */ 32, /* size */ 16},
-                Field{"vec4<f32>", /* align */ 32, /* size */ 16},
+            // Vector types with custom alignment
+            Field{"vec2<i32>", /* align */ 32, /* size */ 8},
+            Field{"vec2<u32>", /* align */ 32, /* size */ 8},
+            Field{"vec2<f32>", /* align */ 32, /* size */ 8},
+            Field{"vec3<i32>", /* align */ 32, /* size */ 12},
+            Field{"vec3<u32>", /* align */ 32, /* size */ 12},
+            Field{"vec3<f32>", /* align */ 32, /* size */ 12},
+            Field{"vec4<i32>", /* align */ 32, /* size */ 16},
+            Field{"vec4<u32>", /* align */ 32, /* size */ 16},
+            Field{"vec4<f32>", /* align */ 32, /* size */ 16},
 
-                // Vector types with custom size
-                Field{"vec2<i32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec2<u32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec2<f32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec3<i32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec3<u32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec3<f32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec4<i32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
-                Field{"vec4<u32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
-                Field{"vec4<f32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            // Vector types with custom size
+            Field{"vec2<i32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec2<u32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec2<f32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec3<i32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec3<u32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec3<f32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec4<i32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            Field{"vec4<u32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            Field{"vec4<f32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
 
-                // Matrix types with no custom alignment or size
-                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16},
-                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24},
-                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32},
-                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32},
-                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48},
-                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64},
+            // Matrix types with no custom alignment or size
+            Field{"mat2x2<f32>", /* align */ 8, /* size */ 16},
+            Field{"mat3x2<f32>", /* align */ 8, /* size */ 24},
+            Field{"mat4x2<f32>", /* align */ 8, /* size */ 32},
+            Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 16, /* size */ 32},
+            Field{"mat3x4<f32>", /* align */ 16, /* size */ 48},
+            Field{"mat4x4<f32>", /* align */ 16, /* size */ 64},
 
-                // Matrix types with custom alignment
-                Field{"mat2x2<f32>", /* align */ 32, /* size */ 16},
-                Field{"mat3x2<f32>", /* align */ 32, /* size */ 24},
-                Field{"mat4x2<f32>", /* align */ 32, /* size */ 32},
-                Field{"mat2x3<f32>", /* align */ 32, /* size */ 32}.Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 32, /* size */ 48}.Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 32, /* size */ 32},
-                Field{"mat3x4<f32>", /* align */ 32, /* size */ 48},
-                Field{"mat4x4<f32>", /* align */ 32, /* size */ 64},
+            // Matrix types with custom alignment
+            Field{"mat2x2<f32>", /* align */ 32, /* size */ 16},
+            Field{"mat3x2<f32>", /* align */ 32, /* size */ 24},
+            Field{"mat4x2<f32>", /* align */ 32, /* size */ 32},
+            Field{"mat2x3<f32>", /* align */ 32, /* size */ 32}.Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 32, /* size */ 48}.Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 32, /* size */ 32},
+            Field{"mat3x4<f32>", /* align */ 32, /* size */ 48},
+            Field{"mat4x4<f32>", /* align */ 32, /* size */ 64},
 
-                // Matrix types with custom size
-                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16}.PaddedSize(128),
-                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24}.PaddedSize(128),
-                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32}.PaddedSize(128),
-                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128),
-                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128),
-                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128),
+            // Matrix types with custom size
+            Field{"mat2x2<f32>", /* align */ 8, /* size */ 16}.PaddedSize(128),
+            Field{"mat3x2<f32>", /* align */ 8, /* size */ 24}.PaddedSize(128),
+            Field{"mat4x2<f32>", /* align */ 8, /* size */ 32}.PaddedSize(128),
+            Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128),
+            Field{"mat3x4<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128),
+            Field{"mat4x4<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128),
 
-                // Array types with no custom alignment or size.
-                // Note: The use of StorageBufferOnly() is due to UBOs requiring 16 byte alignment
-                // of array elements. See https://www.w3.org/TR/WGSL/#storage-class-constraints
-                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}.StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}.StorageBufferOnly(),
-                Field{"array<vec4<u32>, 1>", /* align */ 16, /* size */ 16},
-                Field{"array<vec4<u32>, 2>", /* align */ 16, /* size */ 32},
-                Field{"array<vec4<u32>, 3>", /* align */ 16, /* size */ 48},
-                Field{"array<vec4<u32>, 4>", /* align */ 16, /* size */ 64},
-                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+            // Array types with no custom alignment or size.
+            // Note: The use of StorageBufferOnly() is due to UBOs requiring 16 byte alignment
+            // of array elements. See https://www.w3.org/TR/WGSL/#storage-class-constraints
+            Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 4, /* size */ 12}.StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 4, /* size */ 16}.StorageBufferOnly(),
+            Field{"array<vec4<u32>, 1>", /* align */ 16, /* size */ 16},
+            Field{"array<vec4<u32>, 2>", /* align */ 16, /* size */ 32},
+            Field{"array<vec4<u32>, 3>", /* align */ 16, /* size */ 48},
+            Field{"array<vec4<u32>, 4>", /* align */ 16, /* size */ 64},
+            Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
 
-                // Array types with custom alignment
-                Field{"array<u32, 1>", /* align */ 32, /* size */ 4}.StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 32, /* size */ 8}.StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 32, /* size */ 12}.StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 32, /* size */ 16}.StorageBufferOnly(),
-                Field{"array<vec4<u32>, 1>", /* align */ 32, /* size */ 16},
-                Field{"array<vec4<u32>, 2>", /* align */ 32, /* size */ 32},
-                Field{"array<vec4<u32>, 3>", /* align */ 32, /* size */ 48},
-                Field{"array<vec4<u32>, 4>", /* align */ 32, /* size */ 64},
-                Field{"array<vec3<u32>, 4>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+            // Array types with custom alignment
+            Field{"array<u32, 1>", /* align */ 32, /* size */ 4}.StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 32, /* size */ 8}.StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 32, /* size */ 12}.StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 32, /* size */ 16}.StorageBufferOnly(),
+            Field{"array<vec4<u32>, 1>", /* align */ 32, /* size */ 16},
+            Field{"array<vec4<u32>, 2>", /* align */ 32, /* size */ 32},
+            Field{"array<vec4<u32>, 3>", /* align */ 32, /* size */ 48},
+            Field{"array<vec4<u32>, 4>", /* align */ 32, /* size */ 64},
+            Field{"array<vec3<u32>, 4>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
 
-                // Array types with custom size
-                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-            });
+            // Array types with custom size
+            Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.PaddedSize(128).StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.PaddedSize(128).StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 4, /* size */ 12}
+                .PaddedSize(128)
+                .StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 4, /* size */ 16}
+                .PaddedSize(128)
+                .StorageBufferOnly(),
+            Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}
+                .PaddedSize(128)
+                .Strided<12, 4>(),
+        });
 
-        std::vector<ComputeLayoutMemoryBufferTestParams> filtered;
-        for (auto param : params) {
-            if (param.mStorageClass != StorageClass::Storage && param.mField.storage_buffer_only) {
-                continue;
-            }
-            filtered.emplace_back(param);
+    std::vector<ComputeLayoutMemoryBufferTestParams> filtered;
+    for (auto param : params) {
+        if (param.mStorageClass != StorageClass::Storage && param.mField.storage_buffer_only) {
+            continue;
         }
-        return filtered;
+        filtered.emplace_back(param);
     }
+    return filtered;
+}
 
-    INSTANTIATE_TEST_SUITE_P(
-        ,
-        ComputeLayoutMemoryBufferTests,
-        ::testing::ValuesIn(GenerateParams()),
-        DawnTestBase::PrintToStringParamName("ComputeLayoutMemoryBufferTests"));
-    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(ComputeLayoutMemoryBufferTests);
+INSTANTIATE_TEST_SUITE_P(,
+                         ComputeLayoutMemoryBufferTests,
+                         ::testing::ValuesIn(GenerateParams()),
+                         DawnTestBase::PrintToStringParamName("ComputeLayoutMemoryBufferTests"));
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(ComputeLayoutMemoryBufferTests);
 
 }  // namespace
diff --git a/src/dawn/tests/end2end/CopyTests.cpp b/src/dawn/tests/end2end/CopyTests.cpp
index b789156..843baec 100644
--- a/src/dawn/tests/end2end/CopyTests.cpp
+++ b/src/dawn/tests/end2end/CopyTests.cpp
@@ -16,9 +16,9 @@
 #include <array>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/TestUtils.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
@@ -325,13 +325,13 @@
 };
 
 namespace {
-    // The CopyTests Texture to Texture in this class will validate both CopyTextureToTexture and
-    // CopyTextureToTextureInternal.
-    using UsageCopySrc = bool;
-    DAWN_TEST_PARAM_STRUCT(CopyTestsParams, UsageCopySrc);
+// The CopyTests Texture to Texture in this class will validate both CopyTextureToTexture and
+// CopyTextureToTextureInternal.
+using UsageCopySrc = bool;
+DAWN_TEST_PARAM_STRUCT(CopyTestsParams, UsageCopySrc);
 
-    using SrcColorFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(SrcColorFormatParams, SrcColorFormat);
+using SrcColorFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(SrcColorFormatParams, SrcColorFormat);
 }  // namespace
 
 template <typename Parent>
diff --git a/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
index d746bea..736b4bb 100644
--- a/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
+++ b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
@@ -23,131 +23,131 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-    // Set default texture size to single line texture for color conversion tests.
-    static constexpr uint64_t kDefaultTextureWidth = 10;
-    static constexpr uint64_t kDefaultTextureHeight = 1;
+// Set default texture size to single line texture for color conversion tests.
+static constexpr uint64_t kDefaultTextureWidth = 10;
+static constexpr uint64_t kDefaultTextureHeight = 1;
 
-    enum class ColorSpace : uint32_t {
-        SRGB = 0x00,
-        DisplayP3 = 0x01,
-    };
+enum class ColorSpace : uint32_t {
+    SRGB = 0x00,
+    DisplayP3 = 0x01,
+};
 
-    using SrcFormat = wgpu::TextureFormat;
-    using DstFormat = wgpu::TextureFormat;
-    using SrcOrigin = wgpu::Origin3D;
-    using DstOrigin = wgpu::Origin3D;
-    using CopySize = wgpu::Extent3D;
-    using FlipY = bool;
-    using SrcColorSpace = ColorSpace;
-    using DstColorSpace = ColorSpace;
-    using SrcAlphaMode = wgpu::AlphaMode;
-    using DstAlphaMode = wgpu::AlphaMode;
+using SrcFormat = wgpu::TextureFormat;
+using DstFormat = wgpu::TextureFormat;
+using SrcOrigin = wgpu::Origin3D;
+using DstOrigin = wgpu::Origin3D;
+using CopySize = wgpu::Extent3D;
+using FlipY = bool;
+using SrcColorSpace = ColorSpace;
+using DstColorSpace = ColorSpace;
+using SrcAlphaMode = wgpu::AlphaMode;
+using DstAlphaMode = wgpu::AlphaMode;
 
-    std::ostream& operator<<(std::ostream& o, wgpu::Origin3D origin) {
-        o << origin.x << ", " << origin.y << ", " << origin.z;
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, wgpu::Origin3D origin) {
+    o << origin.x << ", " << origin.y << ", " << origin.z;
+    return o;
+}
 
-    std::ostream& operator<<(std::ostream& o, wgpu::Extent3D copySize) {
-        o << copySize.width << ", " << copySize.height << ", " << copySize.depthOrArrayLayers;
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, wgpu::Extent3D copySize) {
+    o << copySize.width << ", " << copySize.height << ", " << copySize.depthOrArrayLayers;
+    return o;
+}
 
-    std::ostream& operator<<(std::ostream& o, ColorSpace space) {
-        o << static_cast<uint32_t>(space);
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, ColorSpace space) {
+    o << static_cast<uint32_t>(space);
+    return o;
+}
 
-    DAWN_TEST_PARAM_STRUCT(AlphaTestParams, SrcAlphaMode, DstAlphaMode);
-    DAWN_TEST_PARAM_STRUCT(FormatTestParams, SrcFormat, DstFormat);
-    DAWN_TEST_PARAM_STRUCT(SubRectTestParams, SrcOrigin, DstOrigin, CopySize, FlipY);
-    DAWN_TEST_PARAM_STRUCT(ColorSpaceTestParams,
-                           DstFormat,
-                           SrcColorSpace,
-                           DstColorSpace,
-                           SrcAlphaMode,
-                           DstAlphaMode);
+DAWN_TEST_PARAM_STRUCT(AlphaTestParams, SrcAlphaMode, DstAlphaMode);
+DAWN_TEST_PARAM_STRUCT(FormatTestParams, SrcFormat, DstFormat);
+DAWN_TEST_PARAM_STRUCT(SubRectTestParams, SrcOrigin, DstOrigin, CopySize, FlipY);
+DAWN_TEST_PARAM_STRUCT(ColorSpaceTestParams,
+                       DstFormat,
+                       SrcColorSpace,
+                       DstColorSpace,
+                       SrcAlphaMode,
+                       DstAlphaMode);
 
-    // Color Space table
-    struct ColorSpaceInfo {
-        ColorSpace index;
-        std::array<float, 9> toXYZD50;    // 3x3 row major transform matrix
-        std::array<float, 9> fromXYZD50;  // inverse transform matrix of toXYZD50, precomputed
-        std::array<float, 7> gammaDecodingParams;  // Follow { A, B, G, E, epsilon, C, F } order
-        std::array<float, 7> gammaEncodingParams;  // inverse op of decoding, precomputed
-        bool isNonLinear;
-        bool isExtended;  // For extended color space.
-    };
-    static constexpr size_t kSupportedColorSpaceCount = 2;
-    static constexpr std::array<ColorSpaceInfo, kSupportedColorSpaceCount> ColorSpaceTable = {{
-        // sRGB,
-        // Got primary attributes from https://drafts.csswg.org/css-color/#predefined-sRGB
-        // Use matrices from
-        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html#WSMatrices
-        // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
-        // mathematics.
-        {
-            //
-            ColorSpace::SRGB,
-            {{
-                //
-                0.4360747, 0.3850649, 0.1430804,  //
-                0.2225045, 0.7168786, 0.0606169,  //
-                0.0139322, 0.0971045, 0.7141733   //
-            }},
-
-            {{
-                //
-                3.1338561, -1.6168667, -0.4906146,  //
-                -0.9787684, 1.9161415, 0.0334540,   //
-                0.0719453, -0.2289914, 1.4052427    //
-            }},
-
-            // {G, A, B, C, D, E, F, }
-            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
-
-            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
-
-            true,
-            true  //
-        },
-
-        // Display P3, got primary attributes from
-        // https://www.w3.org/TR/css-color-4/#valdef-color-display-p3
-        // Use equations found in
-        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html,
-        // Use Bradford method to do D65 to D50 transform.
-        // Get matrices with help of http://www.russellcottrell.com/photo/matrixCalculator.htm
-        // Gamma-linear conversion params is the same as Srgb.
-        {
-            //
-            ColorSpace::DisplayP3,
-            {{
-                //
-                0.5151114, 0.2919612, 0.1571274,  //
-                0.2411865, 0.6922440, 0.0665695,  //
-                -0.0010491, 0.0418832, 0.7842659  //
-            }},
-
-            {{
-                //
-                2.4039872, -0.9898498, -0.3976181,  //
-                -0.8422138, 1.7988188, 0.0160511,   //
-                0.0481937, -0.0973889, 1.2736887    //
-            }},
-
-            // {G, A, B, C, D, E, F, }
-            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
-
-            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
-
-            true,
-            false  //
-        }
+// Color Space table
+struct ColorSpaceInfo {
+    ColorSpace index;
+    std::array<float, 9> toXYZD50;             // 3x3 row major transform matrix
+    std::array<float, 9> fromXYZD50;           // inverse transform matrix of toXYZD50, precomputed
+    std::array<float, 7> gammaDecodingParams;  // Follow { A, B, G, E, epsilon, C, F } order
+    std::array<float, 7> gammaEncodingParams;  // inverse op of decoding, precomputed
+    bool isNonLinear;
+    bool isExtended;  // For extended color space.
+};
+static constexpr size_t kSupportedColorSpaceCount = 2;
+static constexpr std::array<ColorSpaceInfo, kSupportedColorSpaceCount> ColorSpaceTable = {{
+    // sRGB,
+    // Got primary attributes from https://drafts.csswg.org/css-color/#predefined-sRGB
+    // Use matrices from
+    // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html#WSMatrices
+    // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+    // mathematics.
+    {
         //
-    }};
+        ColorSpace::SRGB,
+        {{
+            //
+            0.4360747, 0.3850649, 0.1430804,  //
+            0.2225045, 0.7168786, 0.0606169,  //
+            0.0139322, 0.0971045, 0.7141733   //
+        }},
+
+        {{
+            //
+            3.1338561, -1.6168667, -0.4906146,  //
+            -0.9787684, 1.9161415, 0.0334540,   //
+            0.0719453, -0.2289914, 1.4052427    //
+        }},
+
+        // {G, A, B, C, D, E, F, }
+        {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+        {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+        true,
+        true  //
+    },
+
+    // Display P3, got primary attributes from
+    // https://www.w3.org/TR/css-color-4/#valdef-color-display-p3
+    // Use equations found in
+    // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html,
+    // Use Bradford method to do D65 to D50 transform.
+    // Get matrices with help of http://www.russellcottrell.com/photo/matrixCalculator.htm
+    // Gamma-linear conversion params is the same as Srgb.
+    {
+        //
+        ColorSpace::DisplayP3,
+        {{
+            //
+            0.5151114, 0.2919612, 0.1571274,  //
+            0.2411865, 0.6922440, 0.0665695,  //
+            -0.0010491, 0.0418832, 0.7842659  //
+        }},
+
+        {{
+            //
+            2.4039872, -0.9898498, -0.3976181,  //
+            -0.8422138, 1.7988188, 0.0160511,   //
+            0.0481937, -0.0973889, 1.2736887    //
+        }},
+
+        // {G, A, B, C, D, E, F, }
+        {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+        {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+        true,
+        false  //
+    }
+    //
+}};
 }  // anonymous namespace
 
 template <typename Parent>
diff --git a/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
index bfec892..6b95755 100644
--- a/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
+++ b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
@@ -19,12 +19,12 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    struct CreatePipelineAsyncTask {
-        wgpu::ComputePipeline computePipeline = nullptr;
-        wgpu::RenderPipeline renderPipeline = nullptr;
-        bool isCompleted = false;
-        std::string message;
-    };
+struct CreatePipelineAsyncTask {
+    wgpu::ComputePipeline computePipeline = nullptr;
+    wgpu::RenderPipeline renderPipeline = nullptr;
+    bool isCompleted = false;
+    std::string message;
+};
 }  // anonymous namespace
 
 class CreatePipelineAsyncTest : public DawnTest {
@@ -65,9 +65,7 @@
         EXPECT_BUFFER_U32_EQ(kExpected, ssbo, 0);
     }
 
-    void ValidateCreateComputePipelineAsync() {
-        ValidateCreateComputePipelineAsync(&task);
-    }
+    void ValidateCreateComputePipelineAsync() { ValidateCreateComputePipelineAsync(&task); }
 
     void ValidateCreateRenderPipelineAsync(CreatePipelineAsyncTask* currentTask) {
         constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
@@ -106,9 +104,7 @@
         EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), outputTexture, 0, 0);
     }
 
-    void ValidateCreateRenderPipelineAsync() {
-        ValidateCreateRenderPipelineAsync(&task);
-    }
+    void ValidateCreateRenderPipelineAsync() { ValidateCreateRenderPipelineAsync(&task); }
 
     void DoCreateRenderPipelineAsync(
         const utils::ComboRenderPipelineDescriptor& renderPipelineDescriptor) {
diff --git a/src/dawn/tests/end2end/D3D12CachingTests.cpp b/src/dawn/tests/end2end/D3D12CachingTests.cpp
index 7987879..c895a53 100644
--- a/src/dawn/tests/end2end/D3D12CachingTests.cpp
+++ b/src/dawn/tests/end2end/D3D12CachingTests.cpp
@@ -22,7 +22,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using ::testing::NiceMock;
+using ::testing::NiceMock;
 }  // namespace
 
 class D3D12CachingTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
index 880e34b..ce81ec2 100644
--- a/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
+++ b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
@@ -30,127 +30,127 @@
 
 namespace {
 
-    using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
+using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
 
-    class D3D12ResourceTestBase : public DawnTest {
-      protected:
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            return {wgpu::FeatureName::DawnInternalUsages};
+class D3D12ResourceTestBase : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
+    }
+
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        if (UsesWire()) {
+            return;
         }
 
-      public:
-        void SetUp() override {
-            DawnTest::SetUp();
-            if (UsesWire()) {
-                return;
-            }
+        // Create the D3D11 device/contexts that will be used in subsequent tests
+        ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device.Get());
 
-            // Create the D3D11 device/contexts that will be used in subsequent tests
-            ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device.Get());
+        const LUID adapterLuid = d3d12Device->GetAdapterLuid();
 
-            const LUID adapterLuid = d3d12Device->GetAdapterLuid();
+        ComPtr<IDXGIFactory4> dxgiFactory;
+        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<IDXGIFactory4> dxgiFactory;
-            HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
-            ASSERT_EQ(hr, S_OK);
+        ComPtr<IDXGIAdapter> dxgiAdapter;
+        hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<IDXGIAdapter> dxgiAdapter;
-            hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
-            ASSERT_EQ(hr, S_OK);
+        ComPtr<ID3D11Device> d3d11Device;
+        D3D_FEATURE_LEVEL d3dFeatureLevel;
+        ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+        hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0, nullptr, 0,
+                                 D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
+                                 &d3d11DeviceContext);
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<ID3D11Device> d3d11Device;
-            D3D_FEATURE_LEVEL d3dFeatureLevel;
-            ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
-            hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0,
-                                     nullptr, 0, D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
-                                     &d3d11DeviceContext);
-            ASSERT_EQ(hr, S_OK);
+        mD3d11Device = std::move(d3d11Device);
+        mD3d11DeviceContext = std::move(d3d11DeviceContext);
 
-            mD3d11Device = std::move(d3d11Device);
-            mD3d11DeviceContext = std::move(d3d11DeviceContext);
+        baseDawnDescriptor.dimension = wgpu::TextureDimension::e2D;
+        baseDawnDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        baseDawnDescriptor.size = {kTestWidth, kTestHeight, 1};
+        baseDawnDescriptor.sampleCount = 1;
+        baseDawnDescriptor.mipLevelCount = 1;
+        baseDawnDescriptor.usage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
 
-            baseDawnDescriptor.dimension = wgpu::TextureDimension::e2D;
-            baseDawnDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            baseDawnDescriptor.size = {kTestWidth, kTestHeight, 1};
-            baseDawnDescriptor.sampleCount = 1;
-            baseDawnDescriptor.mipLevelCount = 1;
-            baseDawnDescriptor.usage =
-                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
-                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
+        baseD3dDescriptor.Width = kTestWidth;
+        baseD3dDescriptor.Height = kTestHeight;
+        baseD3dDescriptor.MipLevels = 1;
+        baseD3dDescriptor.ArraySize = 1;
+        baseD3dDescriptor.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+        baseD3dDescriptor.SampleDesc.Count = 1;
+        baseD3dDescriptor.SampleDesc.Quality = 0;
+        baseD3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
+        baseD3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+        baseD3dDescriptor.CPUAccessFlags = 0;
+        baseD3dDescriptor.MiscFlags =
+            D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+    }
 
-            baseD3dDescriptor.Width = kTestWidth;
-            baseD3dDescriptor.Height = kTestHeight;
-            baseD3dDescriptor.MipLevels = 1;
-            baseD3dDescriptor.ArraySize = 1;
-            baseD3dDescriptor.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
-            baseD3dDescriptor.SampleDesc.Count = 1;
-            baseD3dDescriptor.SampleDesc.Quality = 0;
-            baseD3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
-            baseD3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
-            baseD3dDescriptor.CPUAccessFlags = 0;
-            baseD3dDescriptor.MiscFlags =
-                D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+  protected:
+    void WrapSharedHandle(
+        const wgpu::TextureDescriptor* dawnDesc,
+        const D3D11_TEXTURE2D_DESC* baseD3dDescriptor,
+        wgpu::Texture* dawnTexture,
+        ID3D11Texture2D** d3d11TextureOut,
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI>* externalImageOut = nullptr) const {
+        ComPtr<ID3D11Texture2D> d3d11Texture;
+        HRESULT hr = mD3d11Device->CreateTexture2D(baseD3dDescriptor, nullptr, &d3d11Texture);
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<IDXGIResource1> dxgiResource;
+        hr = d3d11Texture.As(&dxgiResource);
+        ASSERT_EQ(hr, S_OK);
+
+        HANDLE sharedHandle;
+        hr = dxgiResource->CreateSharedHandle(
+            nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+            &sharedHandle);
+        ASSERT_EQ(hr, S_OK);
+
+        dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
+        externalImageDesc.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(dawnDesc);
+        externalImageDesc.sharedHandle = sharedHandle;
+
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
+            dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
+
+        // Now that we've created all of our resources, we can close the handle
+        // since we no longer need it.
+        ::CloseHandle(sharedHandle);
+
+        // Cannot access a non-existent external image (ex. validation error).
+        if (externalImage == nullptr) {
+            return;
         }
 
-      protected:
-        void WrapSharedHandle(const wgpu::TextureDescriptor* dawnDesc,
-                              const D3D11_TEXTURE2D_DESC* baseD3dDescriptor,
-                              wgpu::Texture* dawnTexture,
-                              ID3D11Texture2D** d3d11TextureOut,
-                              std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI>*
-                                  externalImageOut = nullptr) const {
-            ComPtr<ID3D11Texture2D> d3d11Texture;
-            HRESULT hr = mD3d11Device->CreateTexture2D(baseD3dDescriptor, nullptr, &d3d11Texture);
-            ASSERT_EQ(hr, S_OK);
+        dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+        externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDesc->usage);
 
-            ComPtr<IDXGIResource1> dxgiResource;
-            hr = d3d11Texture.As(&dxgiResource);
-            ASSERT_EQ(hr, S_OK);
+        *dawnTexture = wgpu::Texture::Acquire(
+            externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+        *d3d11TextureOut = d3d11Texture.Detach();
 
-            HANDLE sharedHandle;
-            hr = dxgiResource->CreateSharedHandle(
-                nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
-                &sharedHandle);
-            ASSERT_EQ(hr, S_OK);
-
-            dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
-            externalImageDesc.cTextureDescriptor =
-                reinterpret_cast<const WGPUTextureDescriptor*>(dawnDesc);
-            externalImageDesc.sharedHandle = sharedHandle;
-
-            std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
-                dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
-
-            // Now that we've created all of our resources, we can close the handle
-            // since we no longer need it.
-            ::CloseHandle(sharedHandle);
-
-            // Cannot access a non-existent external image (ex. validation error).
-            if (externalImage == nullptr) {
-                return;
-            }
-
-            dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
-            externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDesc->usage);
-
-            *dawnTexture = wgpu::Texture::Acquire(
-                externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
-            *d3d11TextureOut = d3d11Texture.Detach();
-
-            if (externalImageOut != nullptr) {
-                *externalImageOut = std::move(externalImage);
-            }
+        if (externalImageOut != nullptr) {
+            *externalImageOut = std::move(externalImage);
         }
+    }
 
-        static constexpr size_t kTestWidth = 10;
-        static constexpr size_t kTestHeight = 10;
+    static constexpr size_t kTestWidth = 10;
+    static constexpr size_t kTestHeight = 10;
 
-        ComPtr<ID3D11Device> mD3d11Device;
-        ComPtr<ID3D11DeviceContext> mD3d11DeviceContext;
+    ComPtr<ID3D11Device> mD3d11Device;
+    ComPtr<ID3D11DeviceContext> mD3d11DeviceContext;
 
-        D3D11_TEXTURE2D_DESC baseD3dDescriptor;
-        wgpu::TextureDescriptor baseDawnDescriptor;
-    };
+    D3D11_TEXTURE2D_DESC baseD3dDescriptor;
+    wgpu::TextureDescriptor baseDawnDescriptor;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/DepthStencilCopyTests.cpp b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
index 9dd62bd..aef6c3c 100644
--- a/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
@@ -16,27 +16,27 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/TestUtils.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(DepthStencilCopyTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(DepthStencilCopyTestParams, TextureFormat);
 
-    constexpr std::array<wgpu::TextureFormat, 3> kValidDepthCopyTextureFormats = {
-        wgpu::TextureFormat::Depth16Unorm,
-        wgpu::TextureFormat::Depth32Float,
-        wgpu::TextureFormat::Depth32FloatStencil8,
-    };
+constexpr std::array<wgpu::TextureFormat, 3> kValidDepthCopyTextureFormats = {
+    wgpu::TextureFormat::Depth16Unorm,
+    wgpu::TextureFormat::Depth32Float,
+    wgpu::TextureFormat::Depth32FloatStencil8,
+};
 
-    constexpr std::array<wgpu::TextureFormat, 1> kValidDepthCopyFromBufferFormats = {
-        wgpu::TextureFormat::Depth16Unorm,
-    };
+constexpr std::array<wgpu::TextureFormat, 1> kValidDepthCopyFromBufferFormats = {
+    wgpu::TextureFormat::Depth16Unorm,
+};
 }  // namespace
 
 class DepthStencilCopyTests : public DawnTestWithParams<DepthStencilCopyTestParams> {
diff --git a/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
index e60102e..9f27263 100644
--- a/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
@@ -21,165 +21,164 @@
 
 namespace {
 
-    using Format = wgpu::TextureFormat;
-    enum class Check {
-        CopyStencil,
-        StencilTest,
-        CopyDepth,
-        DepthTest,
-        SampleDepth,
-    };
+using Format = wgpu::TextureFormat;
+enum class Check {
+    CopyStencil,
+    StencilTest,
+    CopyDepth,
+    DepthTest,
+    SampleDepth,
+};
 
-    std::ostream& operator<<(std::ostream& o, Check check) {
-        switch (check) {
-            case Check::CopyStencil:
-                o << "CopyStencil";
-                break;
-            case Check::StencilTest:
-                o << "StencilTest";
-                break;
-            case Check::CopyDepth:
-                o << "CopyDepth";
-                break;
-            case Check::DepthTest:
-                o << "DepthTest";
-                break;
-            case Check::SampleDepth:
-                o << "SampleDepth";
-                break;
+std::ostream& operator<<(std::ostream& o, Check check) {
+    switch (check) {
+        case Check::CopyStencil:
+            o << "CopyStencil";
+            break;
+        case Check::StencilTest:
+            o << "StencilTest";
+            break;
+        case Check::CopyDepth:
+            o << "CopyDepth";
+            break;
+        case Check::DepthTest:
+            o << "DepthTest";
+            break;
+        case Check::SampleDepth:
+            o << "SampleDepth";
+            break;
+    }
+    return o;
+}
+
+DAWN_TEST_PARAM_STRUCT(DepthStencilLoadOpTestParams, Format, Check);
+
+constexpr static uint32_t kRTSize = 16;
+constexpr uint32_t kMipLevelCount = 2u;
+constexpr std::array<float, kMipLevelCount> kDepthValues = {0.125f, 0.875f};
+constexpr std::array<uint16_t, kMipLevelCount> kU16DepthValues = {8192u, 57343u};
+constexpr std::array<uint8_t, kMipLevelCount> kStencilValues = {7u, 3u};
+
+class DepthStencilLoadOpTests : public DawnTestWithParams<DepthStencilLoadOpTestParams> {
+  protected:
+    void SetUp() override {
+        DawnTestWithParams<DepthStencilLoadOpTestParams>::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
+
+        // Readback of Depth/Stencil textures not fully supported on GL right now.
+        // Also depends on glTextureView which is not supported on ES.
+        DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {kRTSize, kRTSize};
+        descriptor.format = GetParam().mFormat;
+        descriptor.mipLevelCount = kMipLevelCount;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
+                           wgpu::TextureUsage::TextureBinding;
+
+        texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor textureViewDesc = {};
+        textureViewDesc.mipLevelCount = 1;
+
+        for (uint32_t mipLevel = 0; mipLevel < kMipLevelCount; ++mipLevel) {
+            textureViewDesc.baseMipLevel = mipLevel;
+            textureViews[mipLevel] = texture.CreateView(&textureViewDesc);
+
+            utils::ComboRenderPassDescriptor renderPassDescriptor({}, textureViews[mipLevel]);
+            renderPassDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mFormat);
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue =
+                kDepthValues[mipLevel];
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue =
+                kStencilValues[mipLevel];
+            renderPassDescriptors.push_back(renderPassDescriptor);
         }
-        return o;
     }
 
-    DAWN_TEST_PARAM_STRUCT(DepthStencilLoadOpTestParams, Format, Check);
-
-    constexpr static uint32_t kRTSize = 16;
-    constexpr uint32_t kMipLevelCount = 2u;
-    constexpr std::array<float, kMipLevelCount> kDepthValues = {0.125f, 0.875f};
-    constexpr std::array<uint16_t, kMipLevelCount> kU16DepthValues = {8192u, 57343u};
-    constexpr std::array<uint8_t, kMipLevelCount> kStencilValues = {7u, 3u};
-
-    class DepthStencilLoadOpTests : public DawnTestWithParams<DepthStencilLoadOpTestParams> {
-      protected:
-        void SetUp() override {
-            DawnTestWithParams<DepthStencilLoadOpTestParams>::SetUp();
-
-            DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
-
-            // Readback of Depth/Stencil textures not fully supported on GL right now.
-            // Also depends on glTextureView which is not supported on ES.
-            DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
-
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size = {kRTSize, kRTSize};
-            descriptor.format = GetParam().mFormat;
-            descriptor.mipLevelCount = kMipLevelCount;
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
-                               wgpu::TextureUsage::TextureBinding;
-
-            texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor textureViewDesc = {};
-            textureViewDesc.mipLevelCount = 1;
-
-            for (uint32_t mipLevel = 0; mipLevel < kMipLevelCount; ++mipLevel) {
-                textureViewDesc.baseMipLevel = mipLevel;
-                textureViews[mipLevel] = texture.CreateView(&textureViewDesc);
-
-                utils::ComboRenderPassDescriptor renderPassDescriptor({}, textureViews[mipLevel]);
-                renderPassDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mFormat);
-                renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue =
-                    kDepthValues[mipLevel];
-                renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue =
-                    kStencilValues[mipLevel];
-                renderPassDescriptors.push_back(renderPassDescriptor);
-            }
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            switch (GetParam().mFormat) {
-                case wgpu::TextureFormat::Depth24UnormStencil8:
-                    if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
-                        mIsFormatSupported = true;
-                        return {wgpu::FeatureName::Depth24UnormStencil8};
-                    }
-                    return {};
-                case wgpu::TextureFormat::Depth32FloatStencil8:
-                    if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
-                        mIsFormatSupported = true;
-                        return {wgpu::FeatureName::Depth32FloatStencil8};
-                    }
-                    return {};
-                default:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        switch (GetParam().mFormat) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
                     mIsFormatSupported = true;
-                    return {};
+                    return {wgpu::FeatureName::Depth24UnormStencil8};
+                }
+                return {};
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth32FloatStencil8};
+                }
+                return {};
+            default:
+                mIsFormatSupported = true;
+                return {};
+        }
+    }
+
+    void CheckMipLevel(uint32_t mipLevel) {
+        uint32_t mipSize = std::max(kRTSize >> mipLevel, 1u);
+
+        switch (GetParam().mCheck) {
+            case Check::SampleDepth: {
+                std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                ExpectSampledDepthData(
+                    texture, mipSize, mipSize, 0, mipLevel,
+                    new detail::ExpectEq<float>(expectedDepth.data(), expectedDepth.size(), 0.0001))
+                    << "sample depth mip " << mipLevel;
+                break;
+            }
+
+            case Check::CopyDepth: {
+                if (GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm) {
+                    std::vector<uint16_t> expectedDepth(mipSize * mipSize,
+                                                        kU16DepthValues[mipLevel]);
+                    EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                      mipLevel, wgpu::TextureAspect::DepthOnly)
+                        << "copy depth mip " << mipLevel;
+                } else {
+                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                    EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                      mipLevel, wgpu::TextureAspect::DepthOnly)
+                        << "copy depth mip " << mipLevel;
+                }
+
+                break;
+            }
+
+            case Check::CopyStencil: {
+                std::vector<uint8_t> expectedStencil(mipSize * mipSize, kStencilValues[mipLevel]);
+                EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0}, {mipSize, mipSize},
+                                  mipLevel, wgpu::TextureAspect::StencilOnly)
+                    << "copy stencil mip " << mipLevel;
+                break;
+            }
+
+            case Check::DepthTest: {
+                std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                ExpectAttachmentDepthTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
+                                              mipLevel, expectedDepth)
+                    << "depth test mip " << mipLevel;
+                break;
+            }
+
+            case Check::StencilTest: {
+                ExpectAttachmentStencilTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
+                                                mipLevel, kStencilValues[mipLevel])
+                    << "stencil test mip " << mipLevel;
+                break;
             }
         }
+    }
 
-        void CheckMipLevel(uint32_t mipLevel) {
-            uint32_t mipSize = std::max(kRTSize >> mipLevel, 1u);
+    wgpu::Texture texture;
+    std::array<wgpu::TextureView, kMipLevelCount> textureViews;
+    // Vector instead of array because there is no default constructor.
+    std::vector<utils::ComboRenderPassDescriptor> renderPassDescriptors;
 
-            switch (GetParam().mCheck) {
-                case Check::SampleDepth: {
-                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                    ExpectSampledDepthData(texture, mipSize, mipSize, 0, mipLevel,
-                                           new detail::ExpectEq<float>(
-                                               expectedDepth.data(), expectedDepth.size(), 0.0001))
-                        << "sample depth mip " << mipLevel;
-                    break;
-                }
-
-                case Check::CopyDepth: {
-                    if (GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm) {
-                        std::vector<uint16_t> expectedDepth(mipSize * mipSize,
-                                                            kU16DepthValues[mipLevel]);
-                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
-                                          mipLevel, wgpu::TextureAspect::DepthOnly)
-                            << "copy depth mip " << mipLevel;
-                    } else {
-                        std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
-                                          mipLevel, wgpu::TextureAspect::DepthOnly)
-                            << "copy depth mip " << mipLevel;
-                    }
-
-                    break;
-                }
-
-                case Check::CopyStencil: {
-                    std::vector<uint8_t> expectedStencil(mipSize * mipSize,
-                                                         kStencilValues[mipLevel]);
-                    EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0}, {mipSize, mipSize},
-                                      mipLevel, wgpu::TextureAspect::StencilOnly)
-                        << "copy stencil mip " << mipLevel;
-                    break;
-                }
-
-                case Check::DepthTest: {
-                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                    ExpectAttachmentDepthTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
-                                                  mipLevel, expectedDepth)
-                        << "depth test mip " << mipLevel;
-                    break;
-                }
-
-                case Check::StencilTest: {
-                    ExpectAttachmentStencilTestData(texture, GetParam().mFormat, mipSize, mipSize,
-                                                    0, mipLevel, kStencilValues[mipLevel])
-                        << "stencil test mip " << mipLevel;
-                    break;
-                }
-            }
-        }
-
-        wgpu::Texture texture;
-        std::array<wgpu::TextureView, kMipLevelCount> textureViews;
-        // Vector instead of array because there is no default constructor.
-        std::vector<utils::ComboRenderPassDescriptor> renderPassDescriptors;
-
-      private:
-        bool mIsFormatSupported = false;
-    };
+  private:
+    bool mIsFormatSupported = false;
+};
 
 }  // anonymous namespace
 
@@ -258,31 +257,31 @@
 
 namespace {
 
-    auto GenerateParams() {
-        auto params1 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
-            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
-             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
-            {wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Depth16Unorm},
-            {Check::CopyDepth, Check::DepthTest, Check::SampleDepth});
+auto GenerateParams() {
+    auto params1 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+         OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+        {wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Depth16Unorm},
+        {Check::CopyDepth, Check::DepthTest, Check::SampleDepth});
 
-        auto params2 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
-            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
-             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
-            {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth24UnormStencil8,
-             wgpu::TextureFormat::Depth32FloatStencil8},
-            {Check::CopyStencil, Check::StencilTest, Check::DepthTest, Check::SampleDepth});
+    auto params2 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+         OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+        {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth24UnormStencil8,
+         wgpu::TextureFormat::Depth32FloatStencil8},
+        {Check::CopyStencil, Check::StencilTest, Check::DepthTest, Check::SampleDepth});
 
-        std::vector<DepthStencilLoadOpTestParams> allParams;
-        allParams.insert(allParams.end(), params1.begin(), params1.end());
-        allParams.insert(allParams.end(), params2.begin(), params2.end());
+    std::vector<DepthStencilLoadOpTestParams> allParams;
+    allParams.insert(allParams.end(), params1.begin(), params1.end());
+    allParams.insert(allParams.end(), params2.begin(), params2.end());
 
-        return allParams;
-    }
+    return allParams;
+}
 
-    INSTANTIATE_TEST_SUITE_P(,
-                             DepthStencilLoadOpTests,
-                             ::testing::ValuesIn(GenerateParams()),
-                             DawnTestBase::PrintToStringParamName("DepthStencilLoadOpTests"));
-    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DepthStencilLoadOpTests);
+INSTANTIATE_TEST_SUITE_P(,
+                         DepthStencilLoadOpTests,
+                         ::testing::ValuesIn(GenerateParams()),
+                         DawnTestBase::PrintToStringParamName("DepthStencilLoadOpTests"));
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DepthStencilLoadOpTests);
 
 }  // namespace
diff --git a/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
index 4da042d..574573c 100644
--- a/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
@@ -21,24 +21,24 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(DepthStencilSamplingTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(DepthStencilSamplingTestParams, TextureFormat);
 
-    constexpr wgpu::CompareFunction kCompareFunctions[] = {
-        wgpu::CompareFunction::Never,        wgpu::CompareFunction::Less,
-        wgpu::CompareFunction::LessEqual,    wgpu::CompareFunction::Greater,
-        wgpu::CompareFunction::GreaterEqual, wgpu::CompareFunction::Equal,
-        wgpu::CompareFunction::NotEqual,     wgpu::CompareFunction::Always,
-    };
+constexpr wgpu::CompareFunction kCompareFunctions[] = {
+    wgpu::CompareFunction::Never,        wgpu::CompareFunction::Less,
+    wgpu::CompareFunction::LessEqual,    wgpu::CompareFunction::Greater,
+    wgpu::CompareFunction::GreaterEqual, wgpu::CompareFunction::Equal,
+    wgpu::CompareFunction::NotEqual,     wgpu::CompareFunction::Always,
+};
 
-    // Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
-    constexpr float kCompareRefs[] = {-0.1, 0.4, 1.2};
+// Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
+constexpr float kCompareRefs[] = {-0.1, 0.4, 1.2};
 
-    // Test 0, below the ref, equal to, above the ref, and 1.
-    const std::vector<float> kNormalizedTextureValues = {0.0, 0.3, 0.4, 0.5, 1.0};
+// Test 0, below the ref, equal to, above the ref, and 1.
+const std::vector<float> kNormalizedTextureValues = {0.0, 0.3, 0.4, 0.5, 1.0};
 
-    // Test the limits, and some values in between.
-    const std::vector<uint32_t> kStencilValues = {0, 1, 38, 255};
+// Test the limits, and some values in between.
+const std::vector<uint32_t> kStencilValues = {0, 1, 38, 255};
 
 }  // anonymous namespace
 
@@ -426,8 +426,7 @@
         using StencilData = std::array<uint32_t, 4>;
 
       public:
-        explicit ExtraStencilComponentsExpectation(uint32_t expected) : mExpected(expected) {
-        }
+        explicit ExtraStencilComponentsExpectation(uint32_t expected) : mExpected(expected) {}
 
         ~ExtraStencilComponentsExpectation() override = default;
 
diff --git a/src/dawn/tests/end2end/DeviceInitializationTests.cpp b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
index 0bd35bb..0c7e621 100644
--- a/src/dawn/tests/end2end/DeviceInitializationTests.cpp
+++ b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
@@ -21,13 +21,9 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 class DeviceInitializationTest : public testing::Test {
-    void SetUp() override {
-        dawnProcSetProcs(&dawn::native::GetProcs());
-    }
+    void SetUp() override { dawnProcSetProcs(&dawn::native::GetProcs()); }
 
-    void TearDown() override {
-        dawnProcSetProcs(nullptr);
-    }
+    void TearDown() override { dawnProcSetProcs(nullptr); }
 };
 
 // Test that device operations are still valid if the reference to the instance
diff --git a/src/dawn/tests/end2end/DeviceLostTests.cpp b/src/dawn/tests/end2end/DeviceLostTests.cpp
index 7dfff52..2ac47f6 100644
--- a/src/dawn/tests/end2end/DeviceLostTests.cpp
+++ b/src/dawn/tests/end2end/DeviceLostTests.cpp
@@ -16,11 +16,11 @@
 #include <memory>
 #include <string>
 
-#include "gmock/gmock.h"
 #include "dawn/tests/DawnTest.h"
 #include "dawn/tests/MockCallback.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
+#include "gmock/gmock.h"
 
 using testing::_;
 using testing::Exactly;
diff --git a/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
index 4e2c444..23dee82 100644
--- a/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
+++ b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
@@ -17,8 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -404,11 +404,11 @@
 }
 
 namespace {
-    using ReadBufferUsage = wgpu::BufferUsage;
-    using OOBRead = bool;
-    using OOBWrite = bool;
+using ReadBufferUsage = wgpu::BufferUsage;
+using OOBRead = bool;
+using OOBWrite = bool;
 
-    DAWN_TEST_PARAM_STRUCT(ClampedOOBDynamicBufferOffsetParams, ReadBufferUsage, OOBRead, OOBWrite);
+DAWN_TEST_PARAM_STRUCT(ClampedOOBDynamicBufferOffsetParams, ReadBufferUsage, OOBRead, OOBWrite);
 }  // anonymous namespace
 
 class ClampedOOBDynamicBufferOffsetTests
diff --git a/src/dawn/tests/end2end/ExternalTextureTests.cpp b/src/dawn/tests/end2end/ExternalTextureTests.cpp
index afe28f8..4215c77 100644
--- a/src/dawn/tests/end2end/ExternalTextureTests.cpp
+++ b/src/dawn/tests/end2end/ExternalTextureTests.cpp
@@ -18,30 +18,30 @@
 
 namespace {
 
-    wgpu::Texture Create2DTexture(wgpu::Device device,
-                                  uint32_t width,
-                                  uint32_t height,
-                                  wgpu::TextureFormat format,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = format;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DTexture(wgpu::Device device,
+                              uint32_t width,
+                              uint32_t height,
+                              wgpu::TextureFormat format,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = format;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    class ExternalTextureTests : public DawnTest {
-      protected:
-        static constexpr uint32_t kWidth = 4;
-        static constexpr uint32_t kHeight = 4;
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
-        static constexpr wgpu::TextureUsage kSampledUsage = wgpu::TextureUsage::TextureBinding;
-    };
+class ExternalTextureTests : public DawnTest {
+  protected:
+    static constexpr uint32_t kWidth = 4;
+    static constexpr uint32_t kHeight = 4;
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    static constexpr wgpu::TextureUsage kSampledUsage = wgpu::TextureUsage::TextureBinding;
+};
 }  // anonymous namespace
 
 TEST_P(ExternalTextureTests, CreateExternalTextureSuccess) {
diff --git a/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
index 9dc9e19..768db35 100644
--- a/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
+++ b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
@@ -40,10 +40,10 @@
 }
 
 namespace dawn {
-    template <>
-    struct IsDawnBitmask<CheckIndex> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<CheckIndex> {
+    static constexpr bool enable = true;
+};
 }  // namespace dawn
 
 class FirstIndexOffsetTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
index 0fd8c5b..79f862c 100644
--- a/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
+++ b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
@@ -24,88 +24,82 @@
 
 namespace {
 
-    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
-        CFNumberRef number = CFNumberCreate(nullptr, kCFNumberSInt32Type, &value);
-        CFDictionaryAddValue(dictionary, key, number);
-        CFRelease(number);
+void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+    CFNumberRef number = CFNumberCreate(nullptr, kCFNumberSInt32Type, &value);
+    CFDictionaryAddValue(dictionary, key, number);
+    CFRelease(number);
+}
+
+class ScopedIOSurfaceRef {
+  public:
+    ScopedIOSurfaceRef() : mSurface(nullptr) {}
+    explicit ScopedIOSurfaceRef(IOSurfaceRef surface) : mSurface(surface) {}
+
+    ~ScopedIOSurfaceRef() {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
+            mSurface = nullptr;
+        }
     }
 
-    class ScopedIOSurfaceRef {
-      public:
-        ScopedIOSurfaceRef() : mSurface(nullptr) {
+    IOSurfaceRef get() const { return mSurface; }
+
+    ScopedIOSurfaceRef(ScopedIOSurfaceRef&& other) {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
         }
-        explicit ScopedIOSurfaceRef(IOSurfaceRef surface) : mSurface(surface) {
-        }
-
-        ~ScopedIOSurfaceRef() {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-                mSurface = nullptr;
-            }
-        }
-
-        IOSurfaceRef get() const {
-            return mSurface;
-        }
-
-        ScopedIOSurfaceRef(ScopedIOSurfaceRef&& other) {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-            }
-            mSurface = other.mSurface;
-            other.mSurface = nullptr;
-        }
-
-        ScopedIOSurfaceRef& operator=(ScopedIOSurfaceRef&& other) {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-            }
-            mSurface = other.mSurface;
-            other.mSurface = nullptr;
-
-            return *this;
-        }
-
-        ScopedIOSurfaceRef(const ScopedIOSurfaceRef&) = delete;
-        ScopedIOSurfaceRef& operator=(const ScopedIOSurfaceRef&) = delete;
-
-      private:
-        IOSurfaceRef mSurface = nullptr;
-    };
-
-    ScopedIOSurfaceRef CreateSinglePlaneIOSurface(uint32_t width,
-                                                  uint32_t height,
-                                                  uint32_t format,
-                                                  uint32_t bytesPerElement) {
-        CFMutableDictionaryRef dict =
-            CFDictionaryCreateMutable(kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks,
-                                      &kCFTypeDictionaryValueCallBacks);
-        AddIntegerValue(dict, kIOSurfaceWidth, width);
-        AddIntegerValue(dict, kIOSurfaceHeight, height);
-        AddIntegerValue(dict, kIOSurfacePixelFormat, format);
-        AddIntegerValue(dict, kIOSurfaceBytesPerElement, bytesPerElement);
-
-        IOSurfaceRef ioSurface = IOSurfaceCreate(dict);
-        EXPECT_NE(nullptr, ioSurface);
-        CFRelease(dict);
-
-        return ScopedIOSurfaceRef(ioSurface);
+        mSurface = other.mSurface;
+        other.mSurface = nullptr;
     }
 
-    class IOSurfaceTestBase : public DawnTest {
-      public:
-        wgpu::Texture WrapIOSurface(const wgpu::TextureDescriptor* descriptor,
-                                    IOSurfaceRef ioSurface,
-                                    bool isInitialized = true) {
-            dawn::native::metal::ExternalImageDescriptorIOSurface externDesc;
-            externDesc.cTextureDescriptor =
-                reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
-            externDesc.ioSurface = ioSurface;
-            externDesc.isInitialized = isInitialized;
-            WGPUTexture texture = dawn::native::metal::WrapIOSurface(device.Get(), &externDesc);
-            return wgpu::Texture::Acquire(texture);
+    ScopedIOSurfaceRef& operator=(ScopedIOSurfaceRef&& other) {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
         }
-    };
+        mSurface = other.mSurface;
+        other.mSurface = nullptr;
+
+        return *this;
+    }
+
+    ScopedIOSurfaceRef(const ScopedIOSurfaceRef&) = delete;
+    ScopedIOSurfaceRef& operator=(const ScopedIOSurfaceRef&) = delete;
+
+  private:
+    IOSurfaceRef mSurface = nullptr;
+};
+
+ScopedIOSurfaceRef CreateSinglePlaneIOSurface(uint32_t width,
+                                              uint32_t height,
+                                              uint32_t format,
+                                              uint32_t bytesPerElement) {
+    CFMutableDictionaryRef dict = CFDictionaryCreateMutable(
+        kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
+    AddIntegerValue(dict, kIOSurfaceWidth, width);
+    AddIntegerValue(dict, kIOSurfaceHeight, height);
+    AddIntegerValue(dict, kIOSurfacePixelFormat, format);
+    AddIntegerValue(dict, kIOSurfaceBytesPerElement, bytesPerElement);
+
+    IOSurfaceRef ioSurface = IOSurfaceCreate(dict);
+    EXPECT_NE(nullptr, ioSurface);
+    CFRelease(dict);
+
+    return ScopedIOSurfaceRef(ioSurface);
+}
+
+class IOSurfaceTestBase : public DawnTest {
+  public:
+    wgpu::Texture WrapIOSurface(const wgpu::TextureDescriptor* descriptor,
+                                IOSurfaceRef ioSurface,
+                                bool isInitialized = true) {
+        dawn::native::metal::ExternalImageDescriptorIOSurface externDesc;
+        externDesc.cTextureDescriptor = reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
+        externDesc.ioSurface = ioSurface;
+        externDesc.isInitialized = isInitialized;
+        WGPUTexture texture = dawn::native::metal::WrapIOSurface(device.Get(), &externDesc);
+        return wgpu::Texture::Acquire(texture);
+    }
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/MultisampledSamplingTests.cpp b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
index 8013f6c..9a8ca2a 100644
--- a/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
+++ b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
@@ -20,18 +20,18 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    // https://github.com/gpuweb/gpuweb/issues/108
-    // Vulkan, Metal, and D3D11 have the same standard multisample pattern. D3D12 is the same as
-    // D3D11 but it was left out of the documentation.
-    // {0.375, 0.125}, {0.875, 0.375}, {0.125 0.625}, {0.625, 0.875}
-    // In this test, we store them in -1 to 1 space because it makes it
-    // simpler to upload vertex data. Y is flipped because there is a flip between clip space and
-    // rasterization space.
-    static constexpr std::array<std::array<float, 2>, 4> kSamplePositions = {
-        {{0.375 * 2 - 1, 1 - 0.125 * 2},
-         {0.875 * 2 - 1, 1 - 0.375 * 2},
-         {0.125 * 2 - 1, 1 - 0.625 * 2},
-         {0.625 * 2 - 1, 1 - 0.875 * 2}}};
+// https://github.com/gpuweb/gpuweb/issues/108
+// Vulkan, Metal, and D3D11 have the same standard multisample pattern. D3D12 is the same as
+// D3D11 but it was left out of the documentation.
+// {0.375, 0.125}, {0.875, 0.375}, {0.125 0.625}, {0.625, 0.875}
+// In this test, we store them in -1 to 1 space because it makes it
+// simpler to upload vertex data. Y is flipped because there is a flip between clip space and
+// rasterization space.
+static constexpr std::array<std::array<float, 2>, 4> kSamplePositions = {
+    {{0.375 * 2 - 1, 1 - 0.125 * 2},
+     {0.875 * 2 - 1, 1 - 0.375 * 2},
+     {0.125 * 2 - 1, 1 - 0.625 * 2},
+     {0.625 * 2 - 1, 1 - 0.875 * 2}}};
 }  // anonymous namespace
 
 class MultisampledSamplingTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
index 2b98552..25ab718 100644
--- a/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
+++ b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
@@ -24,245 +24,240 @@
 
 namespace {
 
-    using Format = wgpu::TextureFormat;
-    using Aspect = wgpu::TextureAspect;
-    using Usage = wgpu::TextureUsage;
-    using Dimension = wgpu::TextureDimension;
-    using DepthOrArrayLayers = uint32_t;
-    using MipCount = uint32_t;
-    using Mip = uint32_t;
-    using SampleCount = uint32_t;
+using Format = wgpu::TextureFormat;
+using Aspect = wgpu::TextureAspect;
+using Usage = wgpu::TextureUsage;
+using Dimension = wgpu::TextureDimension;
+using DepthOrArrayLayers = uint32_t;
+using MipCount = uint32_t;
+using Mip = uint32_t;
+using SampleCount = uint32_t;
 
-    DAWN_TEST_PARAM_STRUCT(Params,
-                           Format,
-                           Aspect,
-                           Usage,
-                           Dimension,
-                           DepthOrArrayLayers,
-                           MipCount,
-                           Mip,
-                           SampleCount);
+DAWN_TEST_PARAM_STRUCT(Params,
+                       Format,
+                       Aspect,
+                       Usage,
+                       Dimension,
+                       DepthOrArrayLayers,
+                       MipCount,
+                       Mip,
+                       SampleCount);
 
-    template <typename T>
-    class ExpectNonZero : public detail::CustomTextureExpectation {
-      public:
-        uint32_t DataSize() override {
-            return sizeof(T);
+template <typename T>
+class ExpectNonZero : public detail::CustomTextureExpectation {
+  public:
+    uint32_t DataSize() override { return sizeof(T); }
+
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size % DataSize() == 0 && size > 0);
+        const T* actual = static_cast<const T*>(data);
+        T value = *actual;
+        if (value == T(0)) {
+            return testing::AssertionFailure()
+                   << "Expected data to be non-zero, was " << value << std::endl;
         }
-
-        testing::AssertionResult Check(const void* data, size_t size) override {
-            ASSERT(size % DataSize() == 0 && size > 0);
-            const T* actual = static_cast<const T*>(data);
-            T value = *actual;
-            if (value == T(0)) {
+        for (size_t i = 0; i < size / DataSize(); ++i) {
+            if (actual[i] != value) {
                 return testing::AssertionFailure()
-                       << "Expected data to be non-zero, was " << value << std::endl;
+                       << "Expected data[" << i << "] to match non-zero value " << value
+                       << ", actual " << actual[i] << std::endl;
             }
-            for (size_t i = 0; i < size / DataSize(); ++i) {
-                if (actual[i] != value) {
-                    return testing::AssertionFailure()
-                           << "Expected data[" << i << "] to match non-zero value " << value
-                           << ", actual " << actual[i] << std::endl;
-                }
-            }
-
-            return testing::AssertionSuccess();
         }
-    };
+
+        return testing::AssertionSuccess();
+    }
+};
 
 #define EXPECT_TEXTURE_NONZERO(T, ...) \
     AddTextureExpectation(__FILE__, __LINE__, new ExpectNonZero<T>(), __VA_ARGS__)
 
-    class NonzeroTextureCreationTests : public DawnTestWithParams<Params> {
-      protected:
-        constexpr static uint32_t kSize = 128;
+class NonzeroTextureCreationTests : public DawnTestWithParams<Params> {
+  protected:
+    constexpr static uint32_t kSize = 128;
 
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            if (GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
-                return {wgpu::FeatureName::TextureCompressionBC};
-            }
-            return {};
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        if (GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+            SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
+            return {wgpu::FeatureName::TextureCompressionBC};
         }
+        return {};
+    }
 
-        void Run() {
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                                     !SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}));
+    void Run() {
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                 !SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}));
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading from Snorm textures.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::RGBA8Snorm &&
-                                     HasToggleEnabled("disable_snorm_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading from Snorm textures.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::RGBA8Snorm &&
+                                 HasToggleEnabled("disable_snorm_read"));
 
-            // TODO(crbug.com/dawn/791): Determine Intel specific platforms this occurs on, and
-            // implement a workaround on all backends (happens on Windows too, but not on our test
-            // machines).
-            DAWN_SUPPRESS_TEST_IF(
-                (GetParam().mFormat == wgpu::TextureFormat::Depth32Float ||
-                 GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8) &&
-                IsMetal() && IsIntel() && GetParam().mMip != 0);
+        // TODO(crbug.com/dawn/791): Determine Intel specific platforms this occurs on, and
+        // implement a workaround on all backends (happens on Windows too, but not on our test
+        // machines).
+        DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::Depth32Float ||
+                               GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8) &&
+                              IsMetal() && IsIntel() && GetParam().mMip != 0);
 
-            // TODO(crbug.com/dawn/1071): Implement a workaround on Intel/Metal backends.
-            DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::R8Unorm ||
-                                   GetParam().mFormat == wgpu::TextureFormat::RG8Unorm) &&
-                                  GetParam().mMipCount > 1 &&
-                                  HasToggleEnabled("disable_r8_rg8_mipmaps"));
+        // TODO(crbug.com/dawn/1071): Implement a workaround on Intel/Metal backends.
+        DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::R8Unorm ||
+                               GetParam().mFormat == wgpu::TextureFormat::RG8Unorm) &&
+                              GetParam().mMipCount > 1 &&
+                              HasToggleEnabled("disable_r8_rg8_mipmaps"));
 
-            // TODO(crbug.com/dawn/667): ANGLE claims to support NV_read_stencil, but won't read
-            // correctly from a DEPTH32F_STENCIL8 texture.
-            DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
-                                  GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
-                                  IsANGLE());
+        // TODO(crbug.com/dawn/667): ANGLE claims to support NV_read_stencil, but won't read
+        // correctly from a DEPTH32F_STENCIL8 texture.
+        DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
+                              GetParam().mAspect == wgpu::TextureAspect::StencilOnly && IsANGLE());
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading depth.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::DepthOnly &&
-                                     HasToggleEnabled("disable_depth_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading depth.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::DepthOnly &&
+                                 HasToggleEnabled("disable_depth_read"));
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading stencil.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
-                                     HasToggleEnabled("disable_stencil_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading stencil.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
+                                 HasToggleEnabled("disable_stencil_read"));
 
-            // GL may support the feature, but reading data back is not implemented.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                                     (IsOpenGL() || IsOpenGLES()));
+        // GL may support the feature, but reading data back is not implemented.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                 (IsOpenGL() || IsOpenGLES()));
 
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = GetParam().mDimension;
-            descriptor.size.width = kSize;
-            descriptor.size.height = kSize;
-            descriptor.size.depthOrArrayLayers = GetParam().mDepthOrArrayLayers;
-            descriptor.sampleCount = GetParam().mSampleCount;
-            descriptor.format = GetParam().mFormat;
-            descriptor.usage = GetParam().mUsage;
-            descriptor.mipLevelCount = GetParam().mMipCount;
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = GetParam().mDimension;
+        descriptor.size.width = kSize;
+        descriptor.size.height = kSize;
+        descriptor.size.depthOrArrayLayers = GetParam().mDepthOrArrayLayers;
+        descriptor.sampleCount = GetParam().mSampleCount;
+        descriptor.format = GetParam().mFormat;
+        descriptor.usage = GetParam().mUsage;
+        descriptor.mipLevelCount = GetParam().mMipCount;
 
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
 
-            uint32_t mip = GetParam().mMip;
-            uint32_t mipSize = std::max(kSize >> mip, 1u);
-            uint32_t depthOrArrayLayers = GetParam().mDimension == wgpu::TextureDimension::e3D
-                                              ? std::max(GetParam().mDepthOrArrayLayers >> mip, 1u)
-                                              : GetParam().mDepthOrArrayLayers;
-            switch (GetParam().mFormat) {
-                case wgpu::TextureFormat::R8Unorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 1,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint8_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::RG8Unorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 2,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint16_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::RGBA8Unorm:
-                case wgpu::TextureFormat::RGBA8Snorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 4,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint32_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::Depth32Float: {
-                    EXPECT_TEXTURE_EQ(new ExpectNonZero<float>(), texture, {0, 0, 0},
+        uint32_t mip = GetParam().mMip;
+        uint32_t mipSize = std::max(kSize >> mip, 1u);
+        uint32_t depthOrArrayLayers = GetParam().mDimension == wgpu::TextureDimension::e3D
+                                          ? std::max(GetParam().mDepthOrArrayLayers >> mip, 1u)
+                                          : GetParam().mDepthOrArrayLayers;
+        switch (GetParam().mFormat) {
+            case wgpu::TextureFormat::R8Unorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 1,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint8_t>(), texture, {0, 0, 0},
                                       {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    break;
                 }
-                case wgpu::TextureFormat::Depth24PlusStencil8: {
-                    switch (GetParam().mAspect) {
-                        case wgpu::TextureAspect::DepthOnly: {
-                            for (uint32_t arrayLayer = 0;
-                                 arrayLayer < GetParam().mDepthOrArrayLayers; ++arrayLayer) {
-                                ExpectSampledDepthData(texture, mipSize, mipSize, arrayLayer, mip,
-                                                       new ExpectNonZero<float>())
-                                    << "arrayLayer " << arrayLayer;
-                            }
-                            break;
-                        }
-                        case wgpu::TextureAspect::StencilOnly: {
-                            uint32_t texelCount = mipSize * mipSize * depthOrArrayLayers;
-                            std::vector<uint8_t> expectedStencil(texelCount, 1);
-                            EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0, 0},
-                                              {mipSize, mipSize, depthOrArrayLayers}, mip,
-                                              wgpu::TextureAspect::StencilOnly);
-
-                            break;
-                        }
-                        default:
-                            UNREACHABLE();
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::BC1RGBAUnorm: {
-                    // Set buffer with dirty data so we know it is cleared by the lazy cleared
-                    // texture copy
-                    uint32_t blockWidth = utils::GetTextureFormatBlockWidth(GetParam().mFormat);
-                    uint32_t blockHeight = utils::GetTextureFormatBlockHeight(GetParam().mFormat);
-                    wgpu::Extent3D copySize = {Align(mipSize, blockWidth),
-                                               Align(mipSize, blockHeight), depthOrArrayLayers};
-
-                    uint32_t bytesPerRow =
-                        utils::GetMinimumBytesPerRow(GetParam().mFormat, copySize.width);
-                    uint32_t rowsPerImage = copySize.height / blockHeight;
-
-                    uint64_t bufferSize = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
-                                                                     copySize, GetParam().mFormat);
-
-                    std::vector<uint8_t> data(bufferSize, 100);
-                    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
-                        device, data.data(), bufferSize, wgpu::BufferUsage::CopySrc);
-
-                    wgpu::ImageCopyBuffer imageCopyBuffer =
-                        utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow, rowsPerImage);
-                    wgpu::ImageCopyTexture imageCopyTexture =
-                        utils::CreateImageCopyTexture(texture, mip, {0, 0, 0});
-
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
-                    wgpu::CommandBuffer commands = encoder.Finish();
-                    queue.Submit(1, &commands);
-
-                    uint32_t copiedWidthInBytes =
-                        utils::GetTexelBlockSizeInBytes(GetParam().mFormat) * copySize.width /
-                        blockWidth;
-                    uint8_t* d = data.data();
-                    for (uint32_t z = 0; z < depthOrArrayLayers; ++z) {
-                        for (uint32_t row = 0; row < copySize.height / blockHeight; ++row) {
-                            std::fill_n(d, copiedWidthInBytes, 1);
-                            d += bytesPerRow;
-                        }
-                    }
-                    EXPECT_BUFFER_U8_RANGE_EQ(data.data(), bufferDst, 0, bufferSize);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
+                break;
             }
-        }
-    };
+            case wgpu::TextureFormat::RG8Unorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 2,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint16_t>(), texture, {0, 0, 0},
+                                      {mipSize, mipSize, depthOrArrayLayers}, mip);
+                }
+                break;
+            }
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Snorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 4,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint32_t>(), texture, {0, 0, 0},
+                                      {mipSize, mipSize, depthOrArrayLayers}, mip);
+                }
+                break;
+            }
+            case wgpu::TextureFormat::Depth32Float: {
+                EXPECT_TEXTURE_EQ(new ExpectNonZero<float>(), texture, {0, 0, 0},
+                                  {mipSize, mipSize, depthOrArrayLayers}, mip);
+                break;
+            }
+            case wgpu::TextureFormat::Depth24PlusStencil8: {
+                switch (GetParam().mAspect) {
+                    case wgpu::TextureAspect::DepthOnly: {
+                        for (uint32_t arrayLayer = 0; arrayLayer < GetParam().mDepthOrArrayLayers;
+                             ++arrayLayer) {
+                            ExpectSampledDepthData(texture, mipSize, mipSize, arrayLayer, mip,
+                                                   new ExpectNonZero<float>())
+                                << "arrayLayer " << arrayLayer;
+                        }
+                        break;
+                    }
+                    case wgpu::TextureAspect::StencilOnly: {
+                        uint32_t texelCount = mipSize * mipSize * depthOrArrayLayers;
+                        std::vector<uint8_t> expectedStencil(texelCount, 1);
+                        EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0, 0},
+                                          {mipSize, mipSize, depthOrArrayLayers}, mip,
+                                          wgpu::TextureAspect::StencilOnly);
 
-    class NonzeroNonrenderableTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroCompressedTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroDepthTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroDepthStencilTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroMultisampledTextureCreationTests : public NonzeroTextureCreationTests {};
+                        break;
+                    }
+                    default:
+                        UNREACHABLE();
+                }
+                break;
+            }
+            case wgpu::TextureFormat::BC1RGBAUnorm: {
+                // Set buffer with dirty data so we know it is cleared by the lazy cleared
+                // texture copy
+                uint32_t blockWidth = utils::GetTextureFormatBlockWidth(GetParam().mFormat);
+                uint32_t blockHeight = utils::GetTextureFormatBlockHeight(GetParam().mFormat);
+                wgpu::Extent3D copySize = {Align(mipSize, blockWidth), Align(mipSize, blockHeight),
+                                           depthOrArrayLayers};
+
+                uint32_t bytesPerRow =
+                    utils::GetMinimumBytesPerRow(GetParam().mFormat, copySize.width);
+                uint32_t rowsPerImage = copySize.height / blockHeight;
+
+                uint64_t bufferSize = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
+                                                                 copySize, GetParam().mFormat);
+
+                std::vector<uint8_t> data(bufferSize, 100);
+                wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+                    device, data.data(), bufferSize, wgpu::BufferUsage::CopySrc);
+
+                wgpu::ImageCopyBuffer imageCopyBuffer =
+                    utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow, rowsPerImage);
+                wgpu::ImageCopyTexture imageCopyTexture =
+                    utils::CreateImageCopyTexture(texture, mip, {0, 0, 0});
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+                wgpu::CommandBuffer commands = encoder.Finish();
+                queue.Submit(1, &commands);
+
+                uint32_t copiedWidthInBytes = utils::GetTexelBlockSizeInBytes(GetParam().mFormat) *
+                                              copySize.width / blockWidth;
+                uint8_t* d = data.data();
+                for (uint32_t z = 0; z < depthOrArrayLayers; ++z) {
+                    for (uint32_t row = 0; row < copySize.height / blockHeight; ++row) {
+                        std::fill_n(d, copiedWidthInBytes, 1);
+                        d += bytesPerRow;
+                    }
+                }
+                EXPECT_BUFFER_U8_RANGE_EQ(data.data(), bufferDst, 0, bufferSize);
+                break;
+            }
+            default:
+                UNREACHABLE();
+        }
+    }
+};
+
+class NonzeroNonrenderableTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroCompressedTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroDepthTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroDepthStencilTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroMultisampledTextureCreationTests : public NonzeroTextureCreationTests {};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/QueryTests.cpp b/src/dawn/tests/end2end/QueryTests.cpp
index 28f9761..3fa4860 100644
--- a/src/dawn/tests/end2end/QueryTests.cpp
+++ b/src/dawn/tests/end2end/QueryTests.cpp
@@ -41,9 +41,7 @@
 
     ~OcclusionExpectation() override = default;
 
-    explicit OcclusionExpectation(Result expected) {
-        mExpected = expected;
-    }
+    explicit OcclusionExpectation(Result expected) { mExpected = expected; }
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size % sizeof(uint64_t) == 0);
diff --git a/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
index a3a0979b..ad5c697 100644
--- a/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
+++ b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
@@ -22,8 +22,8 @@
 constexpr static uint32_t kSize = 4;
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(ReadOnlyDepthStencilAttachmentTestsParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(ReadOnlyDepthStencilAttachmentTestsParams, TextureFormat);
 }  // namespace
 
 class ReadOnlyDepthStencilAttachmentTests
@@ -57,9 +57,7 @@
         }
     }
 
-    bool IsFormatSupported() const {
-        return mIsFormatSupported;
-    }
+    bool IsFormatSupported() const { return mIsFormatSupported; }
 
     wgpu::RenderPipeline CreateRenderPipeline(wgpu::TextureAspect aspect,
                                               wgpu::TextureFormat format,
diff --git a/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
index f61e7d8..6fb5ddf 100644
--- a/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
+++ b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
@@ -23,8 +23,7 @@
 
 class DrawQuad {
   public:
-    DrawQuad() {
-    }
+    DrawQuad() {}
     DrawQuad(wgpu::Device device, const char* vsSource, const char* fsSource) : device(device) {
         vsModule = utils::CreateShaderModule(device, vsSource);
         fsModule = utils::CreateShaderModule(device, fsSource);
diff --git a/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
index ef0608c..58c45ce 100644
--- a/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
+++ b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
@@ -26,28 +26,28 @@
 constexpr static uint32_t kBytesPerBlock = 4;
 
 namespace {
-    enum class Type { B2TCopy, T2BCopy };
+enum class Type { B2TCopy, T2BCopy };
 
-    std::ostream& operator<<(std::ostream& o, Type copyType) {
-        switch (copyType) {
-            case Type::B2TCopy:
-                o << "B2TCopy";
-                break;
-            case Type::T2BCopy:
-                o << "T2BCopy";
-                break;
-        }
-        return o;
+std::ostream& operator<<(std::ostream& o, Type copyType) {
+    switch (copyType) {
+        case Type::B2TCopy:
+            o << "B2TCopy";
+            break;
+        case Type::T2BCopy:
+            o << "T2BCopy";
+            break;
     }
+    return o;
+}
 
-    using TextureDimension = wgpu::TextureDimension;
-    using CopyDepth = uint32_t;
-    using ExtraRowsPerImage = uint64_t;
-    DAWN_TEST_PARAM_STRUCT(RequiredBufferSizeInCopyTestsParams,
-                           Type,
-                           TextureDimension,
-                           CopyDepth,
-                           ExtraRowsPerImage);
+using TextureDimension = wgpu::TextureDimension;
+using CopyDepth = uint32_t;
+using ExtraRowsPerImage = uint64_t;
+DAWN_TEST_PARAM_STRUCT(RequiredBufferSizeInCopyTestsParams,
+                       Type,
+                       TextureDimension,
+                       CopyDepth,
+                       ExtraRowsPerImage);
 }  // namespace
 
 // Tests in this file are used to expose an error on D3D12 about required minimum buffer size.
diff --git a/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
index fdec46e..1e5a91e 100644
--- a/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
+++ b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
@@ -15,20 +15,20 @@
 #include <cmath>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 constexpr static unsigned int kRTSize = 16;
 
 namespace {
-    // MipLevel colors, ordering from base level to high level
-    // each mipmap of the texture is having a different color
-    // so we can check if the sampler anisotropic filtering is fetching
-    // from the correct miplevel
-    const std::array<RGBA8, 3> colors = {RGBA8::kRed, RGBA8::kGreen, RGBA8::kBlue};
+// MipLevel colors, ordering from base level to high level
+// each mipmap of the texture is having a different color
+// so we can check if the sampler anisotropic filtering is fetching
+// from the correct miplevel
+const std::array<RGBA8, 3> colors = {RGBA8::kRed, RGBA8::kGreen, RGBA8::kBlue};
 }  // namespace
 
 class SamplerFilterAnisotropicTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/SamplerTests.cpp b/src/dawn/tests/end2end/SamplerTests.cpp
index c2c9830..3ad38ec 100644
--- a/src/dawn/tests/end2end/SamplerTests.cpp
+++ b/src/dawn/tests/end2end/SamplerTests.cpp
@@ -25,28 +25,28 @@
 constexpr static unsigned int kRTSize = 64;
 
 namespace {
-    struct AddressModeTestCase {
-        wgpu::AddressMode mMode;
-        uint8_t mExpected2;
-        uint8_t mExpected3;
-    };
-    AddressModeTestCase addressModes[] = {
-        {
-            wgpu::AddressMode::Repeat,
-            0,
-            255,
-        },
-        {
-            wgpu::AddressMode::MirrorRepeat,
-            255,
-            0,
-        },
-        {
-            wgpu::AddressMode::ClampToEdge,
-            255,
-            255,
-        },
-    };
+struct AddressModeTestCase {
+    wgpu::AddressMode mMode;
+    uint8_t mExpected2;
+    uint8_t mExpected3;
+};
+AddressModeTestCase addressModes[] = {
+    {
+        wgpu::AddressMode::Repeat,
+        0,
+        255,
+    },
+    {
+        wgpu::AddressMode::MirrorRepeat,
+        255,
+        0,
+    },
+    {
+        wgpu::AddressMode::ClampToEdge,
+        255,
+        255,
+    },
+};
 }  // namespace
 
 class SamplerTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/ShaderFloat16Tests.cpp b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
index b1af453..81c7ed6 100644
--- a/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
+++ b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
@@ -29,9 +29,7 @@
         return {wgpu::FeatureName::DawnShaderFloat16};
     }
 
-    bool IsShaderFloat16Supported() const {
-        return mIsShaderFloat16Supported;
-    }
+    bool IsShaderFloat16Supported() const { return mIsShaderFloat16Supported; }
 
     bool mIsShaderFloat16Supported = false;
 };
diff --git a/src/dawn/tests/end2end/StorageTextureTests.cpp b/src/dawn/tests/end2end/StorageTextureTests.cpp
index 5cd0411..058e108 100644
--- a/src/dawn/tests/end2end/StorageTextureTests.cpp
+++ b/src/dawn/tests/end2end/StorageTextureTests.cpp
@@ -25,11 +25,11 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    bool OpenGLESSupportsStorageTexture(wgpu::TextureFormat format) {
-        // TODO(crbug.com/dawn/595): 32-bit RG* formats are unsupported on OpenGL ES.
-        return format != wgpu::TextureFormat::RG32Float &&
-               format != wgpu::TextureFormat::RG32Sint && format != wgpu::TextureFormat::RG32Uint;
-    }
+bool OpenGLESSupportsStorageTexture(wgpu::TextureFormat format) {
+    // TODO(crbug.com/dawn/595): 32-bit RG* formats are unsupported on OpenGL ES.
+    return format != wgpu::TextureFormat::RG32Float && format != wgpu::TextureFormat::RG32Sint &&
+           format != wgpu::TextureFormat::RG32Uint;
+}
 }  // namespace
 
 class StorageTextureTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/SwapChainValidationTests.cpp b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
index 0da7210..534e95c 100644
--- a/src/dawn/tests/end2end/SwapChainValidationTests.cpp
+++ b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
@@ -70,9 +70,7 @@
     wgpu::SwapChainDescriptor badDescriptor;
 
     // Checks that a RenderAttachment view is an error by trying to create a render pass on it.
-    void CheckTextureViewIsError(wgpu::TextureView view) {
-        CheckTextureView(view, true, false);
-    }
+    void CheckTextureViewIsError(wgpu::TextureView view) { CheckTextureView(view, true, false); }
 
     // Checks that a RenderAttachment view is an error by trying to submit a render pass on it.
     void CheckTextureViewIsDestroyed(wgpu::TextureView view) {
@@ -80,9 +78,7 @@
     }
 
     // Checks that a RenderAttachment view is valid by submitting a render pass on it.
-    void CheckTextureViewIsValid(wgpu::TextureView view) {
-        CheckTextureView(view, false, false);
-    }
+    void CheckTextureViewIsValid(wgpu::TextureView view) { CheckTextureView(view, false, false); }
 
   private:
     void CheckTextureView(wgpu::TextureView view, bool errorAtFinish, bool errorAtSubmit) {
diff --git a/src/dawn/tests/end2end/TextureFormatTests.cpp b/src/dawn/tests/end2end/TextureFormatTests.cpp
index ca875bc..0c7de09 100644
--- a/src/dawn/tests/end2end/TextureFormatTests.cpp
+++ b/src/dawn/tests/end2end/TextureFormatTests.cpp
@@ -18,9 +18,9 @@
 #include <utility>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
@@ -30,8 +30,7 @@
 class ExpectFloatWithTolerance : public detail::Expectation {
   public:
     ExpectFloatWithTolerance(std::vector<float> expected, float tolerance)
-        : mExpected(std::move(expected)), mTolerance(tolerance) {
-    }
+        : mExpected(std::move(expected)), mTolerance(tolerance) {}
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size == sizeof(float) * mExpected.size());
@@ -78,8 +77,7 @@
 // An expectation for float16 buffers that can correctly compare NaNs (all NaNs are equivalent).
 class ExpectFloat16 : public detail::Expectation {
   public:
-    explicit ExpectFloat16(std::vector<uint16_t> expected) : mExpected(std::move(expected)) {
-    }
+    explicit ExpectFloat16(std::vector<uint16_t> expected) : mExpected(std::move(expected)) {}
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size == sizeof(uint16_t) * mExpected.size());
diff --git a/src/dawn/tests/end2end/TextureViewTests.cpp b/src/dawn/tests/end2end/TextureViewTests.cpp
index 6c1fed9..2ad6cd0 100644
--- a/src/dawn/tests/end2end/TextureViewTests.cpp
+++ b/src/dawn/tests/end2end/TextureViewTests.cpp
@@ -17,10 +17,10 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -29,40 +29,40 @@
 constexpr uint32_t kBytesPerTexel = 4;
 
 namespace {
-    wgpu::Texture Create2DTexture(wgpu::Device device,
-                                  uint32_t width,
-                                  uint32_t height,
-                                  uint32_t arrayLayerCount,
-                                  uint32_t mipLevelCount,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DTexture(wgpu::Device device,
+                              uint32_t width,
+                              uint32_t height,
+                              uint32_t arrayLayerCount,
+                              uint32_t mipLevelCount,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::Texture Create3DTexture(wgpu::Device device,
-                                  wgpu::Extent3D size,
-                                  uint32_t mipLevelCount,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e3D;
-        descriptor.size = size;
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create3DTexture(wgpu::Device device,
+                              wgpu::Extent3D size,
+                              uint32_t mipLevelCount,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+    descriptor.size = size;
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::ShaderModule CreateDefaultVertexShaderModule(wgpu::Device device) {
-        return utils::CreateShaderModule(device, R"(
+wgpu::ShaderModule CreateDefaultVertexShaderModule(wgpu::Device device) {
+    return utils::CreateShaderModule(device, R"(
             struct VertexOut {
                 @location(0) texCoord : vec2<f32>,
                 @builtin(position) position : vec4<f32>,
@@ -90,7 +90,7 @@
                 return output;
             }
         )");
-    }
+}
 }  // anonymous namespace
 
 class TextureViewSamplingTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/TextureZeroInitTests.cpp b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
index 8a19943..aa30357 100644
--- a/src/dawn/tests/end2end/TextureZeroInitTests.cpp
+++ b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
@@ -1721,9 +1721,7 @@
         return {wgpu::FeatureName::TextureCompressionBC};
     }
 
-    bool IsBCFormatSupported() const {
-        return mIsBCFormatSupported;
-    }
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
 
     // Copy the compressed texture data into the destination texture.
     void InitializeDataInCompressedTextureAndExpectLazyClear(
diff --git a/src/dawn/tests/end2end/VideoViewsTests.cpp b/src/dawn/tests/end2end/VideoViewsTests.cpp
index c195996..32666db 100644
--- a/src/dawn/tests/end2end/VideoViewsTests.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests.cpp
@@ -19,8 +19,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 VideoViewsTestBackend::PlatformTexture::PlatformTexture(wgpu::Texture&& texture)
-    : wgpuTexture(texture) {
-}
+    : wgpuTexture(texture) {}
 VideoViewsTestBackend::PlatformTexture::~PlatformTexture() = default;
 
 VideoViewsTestBackend::~VideoViewsTestBackend() = default;
diff --git a/src/dawn/tests/end2end/VideoViewsTests.h b/src/dawn/tests/end2end/VideoViewsTests.h
index a97999a..e614fea 100644
--- a/src/dawn/tests/end2end/VideoViewsTests.h
+++ b/src/dawn/tests/end2end/VideoViewsTests.h
@@ -29,8 +29,7 @@
     virtual ~VideoViewsTestBackend();
 
     virtual void OnSetUp(WGPUDevice device) = 0;
-    virtual void OnTearDown() {
-    }
+    virtual void OnTearDown() {}
 
     class PlatformTexture {
       public:
diff --git a/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
index 9116de1..9a45b6c 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
@@ -27,16 +27,15 @@
 // "linux-chromeos-rel"'s gbm.h is too old to compile, missing this change at least:
 // https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1963001/10/gbm.h#244
 #ifndef MINIGBM
-#    define GBM_BO_USE_TEXTURING (1 << 5)
-#    define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
-#    define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
+#define GBM_BO_USE_TEXTURING (1 << 5)
+#define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
+#define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
 #endif
 
 class PlatformTextureGbm : public VideoViewsTestBackend::PlatformTexture {
   public:
     PlatformTextureGbm(wgpu::Texture&& texture, gbm_bo* gbmBo)
-        : PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {
-    }
+        : PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {}
     ~PlatformTextureGbm() override = default;
 
     // TODO(chromium:1258986): Add DISJOINT vkImage support for multi-plannar formats.
@@ -52,9 +51,7 @@
         return true;
     }
 
-    gbm_bo* GetGbmBo() {
-        return mGbmBo;
-    }
+    gbm_bo* GetGbmBo() { return mGbmBo; }
 
   private:
     gbm_bo* mGbmBo = nullptr;
@@ -67,9 +64,7 @@
         mGbmDevice = CreateGbmDevice();
     }
 
-    void OnTearDown() override {
-        gbm_device_destroy(mGbmDevice);
-    }
+    void OnTearDown() override { gbm_device_destroy(mGbmDevice); }
 
   private:
     gbm_device* CreateGbmDevice() {
diff --git a/src/dawn/tests/end2end/VideoViewsTests_mac.cpp b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
index 449eba0..c7f480e 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
@@ -26,11 +26,11 @@
 #include "dawn/native/MetalBackend.h"
 
 namespace {
-    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
-        CFNumberRef number(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
-        CFDictionaryAddValue(dictionary, key, number);
-        CFRelease(number);
-    }
+void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+    CFNumberRef number(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
+    CFDictionaryAddValue(dictionary, key, number);
+    CFRelease(number);
+}
 
 }  // anonymous namespace
 
@@ -40,13 +40,9 @@
         : PlatformTexture(std::move(texture)) {
         mIOSurface = AcquireCFRef<IOSurfaceRef>(iosurface);
     }
-    ~PlatformTextureIOSurface() override {
-        mIOSurface = nullptr;
-    }
+    ~PlatformTextureIOSurface() override { mIOSurface = nullptr; }
 
-    bool CanWrapAsWGPUTexture() override {
-        return true;
-    }
+    bool CanWrapAsWGPUTexture() override { return true; }
 
   private:
     CFRef<IOSurfaceRef> mIOSurface = nullptr;
@@ -54,9 +50,7 @@
 
 class VideoViewsTestBackendIOSurface : public VideoViewsTestBackend {
   public:
-    void OnSetUp(WGPUDevice device) override {
-        mWGPUDevice = device;
-    }
+    void OnSetUp(WGPUDevice device) override { mWGPUDevice = device; }
 
   private:
     OSType ToCVFormat(wgpu::TextureFormat format) {
@@ -173,8 +167,7 @@
     }
 
     void DestroyVideoTextureForTest(
-        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {
-    }
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {}
 
     WGPUDevice mWGPUDevice = nullptr;
 };
diff --git a/src/dawn/tests/end2end/VideoViewsTests_win.cpp b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
index beba015..a402dee 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_win.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
@@ -29,13 +29,10 @@
 
 class PlatformTextureWin : public VideoViewsTestBackend::PlatformTexture {
   public:
-    explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {
-    }
+    explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {}
     ~PlatformTextureWin() override = default;
 
-    bool CanWrapAsWGPUTexture() override {
-        return true;
-    }
+    bool CanWrapAsWGPUTexture() override { return true; }
 };
 
 class VideoViewsTestBackendWin : public VideoViewsTestBackend {
@@ -173,8 +170,7 @@
     }
 
     void DestroyVideoTextureForTest(
-        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {
-    }
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {}
 
     WGPUDevice mWGPUDevice = nullptr;
     ComPtr<ID3D11Device> mD3d11Device;
diff --git a/src/dawn/tests/end2end/WindowSurfaceTests.cpp b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
index 6e45523..f916605 100644
--- a/src/dawn/tests/end2end/WindowSurfaceTests.cpp
+++ b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
@@ -25,17 +25,17 @@
 
 // Include windows.h before GLFW so GLFW's APIENTRY macro doesn't conflict with windows.h's.
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include "dawn/common/windows_with_undefs.h"
+#include "dawn/common/windows_with_undefs.h"
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #include "GLFW/glfw3.h"
 
 #if defined(DAWN_USE_X11)
-#    include "dawn/common/xlib_with_undefs.h"
+#include "dawn/common/xlib_with_undefs.h"
 #endif  // defined(DAWN_USE_X11)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    include "dawn/utils/ObjCUtils.h"
+#include "dawn/utils/ObjCUtils.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #include "GLFW/glfw3native.h"
diff --git a/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp b/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
index d07b18d..8467437 100644
--- a/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
+++ b/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
@@ -79,8 +79,7 @@
 }
 
 DawnCachingMockPlatform::DawnCachingMockPlatform(dawn::platform::CachingInterface* cachingInterface)
-    : mCachingInterface(cachingInterface) {
-}
+    : mCachingInterface(cachingInterface) {}
 
 dawn::platform::CachingInterface* DawnCachingMockPlatform::GetCachingInterface(
     const void* fingerprint,
diff --git a/src/dawn/tests/perf_tests/BufferUploadPerf.cpp b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
index 0331e47..8381729 100644
--- a/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
+++ b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
@@ -19,68 +19,67 @@
 
 namespace {
 
-    constexpr unsigned int kNumIterations = 50;
+constexpr unsigned int kNumIterations = 50;
 
-    enum class UploadMethod {
-        WriteBuffer,
-        MappedAtCreation,
-    };
+enum class UploadMethod {
+    WriteBuffer,
+    MappedAtCreation,
+};
 
-    // Perf delta exists between ranges [0, 1MB] vs [1MB, MAX_SIZE).
-    // These are sample buffer sizes within each range.
-    enum class UploadSize {
-        BufferSize_1KB = 1 * 1024,
-        BufferSize_64KB = 64 * 1024,
-        BufferSize_1MB = 1 * 1024 * 1024,
+// Perf delta exists between ranges [0, 1MB] vs [1MB, MAX_SIZE).
+// These are sample buffer sizes within each range.
+enum class UploadSize {
+    BufferSize_1KB = 1 * 1024,
+    BufferSize_64KB = 64 * 1024,
+    BufferSize_1MB = 1 * 1024 * 1024,
 
-        BufferSize_4MB = 4 * 1024 * 1024,
-        BufferSize_16MB = 16 * 1024 * 1024,
-    };
+    BufferSize_4MB = 4 * 1024 * 1024,
+    BufferSize_16MB = 16 * 1024 * 1024,
+};
 
-    struct BufferUploadParams : AdapterTestParam {
-        BufferUploadParams(const AdapterTestParam& param,
-                           UploadMethod uploadMethod,
-                           UploadSize uploadSize)
-            : AdapterTestParam(param), uploadMethod(uploadMethod), uploadSize(uploadSize) {
-        }
+struct BufferUploadParams : AdapterTestParam {
+    BufferUploadParams(const AdapterTestParam& param,
+                       UploadMethod uploadMethod,
+                       UploadSize uploadSize)
+        : AdapterTestParam(param), uploadMethod(uploadMethod), uploadSize(uploadSize) {}
 
-        UploadMethod uploadMethod;
-        UploadSize uploadSize;
-    };
+    UploadMethod uploadMethod;
+    UploadSize uploadSize;
+};
 
-    std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
-        ostream << static_cast<const AdapterTestParam&>(param);
+std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
+    ostream << static_cast<const AdapterTestParam&>(param);
 
-        switch (param.uploadMethod) {
-            case UploadMethod::WriteBuffer:
-                ostream << "_WriteBuffer";
-                break;
-            case UploadMethod::MappedAtCreation:
-                ostream << "_MappedAtCreation";
-                break;
-        }
-
-        switch (param.uploadSize) {
-            case UploadSize::BufferSize_1KB:
-                ostream << "_BufferSize_1KB";
-                break;
-            case UploadSize::BufferSize_64KB:
-                ostream << "_BufferSize_64KB";
-                break;
-            case UploadSize::BufferSize_1MB:
-                ostream << "_BufferSize_1MB";
-                break;
-            case UploadSize::BufferSize_4MB:
-                ostream << "_BufferSize_4MB";
-                break;
-            case UploadSize::BufferSize_16MB:
-                ostream << "_BufferSize_16MB";
-                break;
-        }
-
-        return ostream;
+    switch (param.uploadMethod) {
+        case UploadMethod::WriteBuffer:
+            ostream << "_WriteBuffer";
+            break;
+        case UploadMethod::MappedAtCreation:
+            ostream << "_MappedAtCreation";
+            break;
     }
 
+    switch (param.uploadSize) {
+        case UploadSize::BufferSize_1KB:
+            ostream << "_BufferSize_1KB";
+            break;
+        case UploadSize::BufferSize_64KB:
+            ostream << "_BufferSize_64KB";
+            break;
+        case UploadSize::BufferSize_1MB:
+            ostream << "_BufferSize_1MB";
+            break;
+        case UploadSize::BufferSize_4MB:
+            ostream << "_BufferSize_4MB";
+            break;
+        case UploadSize::BufferSize_16MB:
+            ostream << "_BufferSize_16MB";
+            break;
+    }
+
+    return ostream;
+}
+
 }  // namespace
 
 // Test uploading |kBufferSize| bytes of data |kNumIterations| times.
@@ -88,8 +87,7 @@
   public:
     BufferUploadPerf()
         : DawnPerfTestWithParams(kNumIterations, 1),
-          data(static_cast<size_t>(GetParam().uploadSize)) {
-    }
+          data(static_cast<size_t>(GetParam().uploadSize)) {}
     ~BufferUploadPerf() override = default;
 
     void SetUp() override;
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.cpp b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
index aece2b8..71d5eec 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTest.cpp
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
@@ -27,47 +27,47 @@
 
 namespace {
 
-    DawnPerfTestEnvironment* gTestEnv = nullptr;
+DawnPerfTestEnvironment* gTestEnv = nullptr;
 
-    void DumpTraceEventsToJSONFile(
-        const std::vector<DawnPerfTestPlatform::TraceEvent>& traceEventBuffer,
-        const char* traceFile) {
-        std::ofstream outFile;
-        outFile.open(traceFile, std::ios_base::app);
+void DumpTraceEventsToJSONFile(
+    const std::vector<DawnPerfTestPlatform::TraceEvent>& traceEventBuffer,
+    const char* traceFile) {
+    std::ofstream outFile;
+    outFile.open(traceFile, std::ios_base::app);
 
-        for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
-            const char* category = nullptr;
-            switch (traceEvent.category) {
-                case dawn::platform::TraceCategory::General:
-                    category = "general";
-                    break;
-                case dawn::platform::TraceCategory::Validation:
-                    category = "validation";
-                    break;
-                case dawn::platform::TraceCategory::Recording:
-                    category = "recording";
-                    break;
-                case dawn::platform::TraceCategory::GPUWork:
-                    category = "gpu";
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-
-            uint64_t microseconds = static_cast<uint64_t>(traceEvent.timestamp * 1000.0 * 1000.0);
-
-            outFile << ", { "
-                    << "\"name\": \"" << traceEvent.name << "\", "
-                    << "\"cat\": \"" << category << "\", "
-                    << "\"ph\": \"" << traceEvent.phase << "\", "
-                    << "\"id\": " << traceEvent.id << ", "
-                    << "\"tid\": " << traceEvent.threadId << ", "
-                    << "\"ts\": " << microseconds << ", "
-                    << "\"pid\": \"Dawn\""
-                    << " }";
+    for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
+        const char* category = nullptr;
+        switch (traceEvent.category) {
+            case dawn::platform::TraceCategory::General:
+                category = "general";
+                break;
+            case dawn::platform::TraceCategory::Validation:
+                category = "validation";
+                break;
+            case dawn::platform::TraceCategory::Recording:
+                category = "recording";
+                break;
+            case dawn::platform::TraceCategory::GPUWork:
+                category = "gpu";
+                break;
+            default:
+                UNREACHABLE();
         }
-        outFile.close();
+
+        uint64_t microseconds = static_cast<uint64_t>(traceEvent.timestamp * 1000.0 * 1000.0);
+
+        outFile << ", { "
+                << "\"name\": \"" << traceEvent.name << "\", "
+                << "\"cat\": \"" << category << "\", "
+                << "\"ph\": \"" << traceEvent.phase << "\", "
+                << "\"id\": " << traceEvent.id << ", "
+                << "\"tid\": " << traceEvent.threadId << ", "
+                << "\"ts\": " << microseconds << ", "
+                << "\"pid\": \"Dawn\""
+                << " }";
     }
+    outFile.close();
+}
 
 }  // namespace
 
@@ -179,8 +179,7 @@
     : mTest(test),
       mIterationsPerStep(iterationsPerStep),
       mMaxStepsInFlight(maxStepsInFlight),
-      mTimer(utils::CreateTimer()) {
-}
+      mTimer(utils::CreateTimer()) {}
 
 DawnPerfTestBase::~DawnPerfTestBase() = default;
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.h b/src/dawn/tests/perf_tests/DawnPerfTest.h
index 7b70c1b..20d2a2f 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTest.h
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.h
@@ -21,7 +21,7 @@
 #include "dawn/tests/DawnTest.h"
 
 namespace utils {
-    class Timer;
+class Timer;
 }
 
 class DawnPerfTestPlatform;
@@ -116,8 +116,7 @@
   protected:
     DawnPerfTestWithParams(unsigned int iterationsPerStep, unsigned int maxStepsInFlight)
         : DawnTestWithParams<Params>(),
-          DawnPerfTestBase(this, iterationsPerStep, maxStepsInFlight) {
-    }
+          DawnPerfTestBase(this, iterationsPerStep, maxStepsInFlight) {}
     void SetUp() override {
         DawnTestWithParams<Params>::SetUp();
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
index ae5307a..32e77fd 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
@@ -24,28 +24,27 @@
 #include "dawn/utils/Timer.h"
 namespace {
 
-    struct TraceCategoryInfo {
-        unsigned char enabled;
-        dawn::platform::TraceCategory category;
-    };
+struct TraceCategoryInfo {
+    unsigned char enabled;
+    dawn::platform::TraceCategory category;
+};
 
-    constexpr TraceCategoryInfo gTraceCategories[4] = {
-        {1, dawn::platform::TraceCategory::General},
-        {1, dawn::platform::TraceCategory::Validation},
-        {1, dawn::platform::TraceCategory::Recording},
-        {1, dawn::platform::TraceCategory::GPUWork},
-    };
+constexpr TraceCategoryInfo gTraceCategories[4] = {
+    {1, dawn::platform::TraceCategory::General},
+    {1, dawn::platform::TraceCategory::Validation},
+    {1, dawn::platform::TraceCategory::Recording},
+    {1, dawn::platform::TraceCategory::GPUWork},
+};
 
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
 
 }  // anonymous namespace
 
 DawnPerfTestPlatform::DawnPerfTestPlatform()
-    : dawn::platform::Platform(), mTimer(utils::CreateTimer()) {
-}
+    : dawn::platform::Platform(), mTimer(utils::CreateTimer()) {}
 
 DawnPerfTestPlatform::~DawnPerfTestPlatform() = default;
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
index 996d2bb..6c3a95e 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
@@ -25,7 +25,7 @@
 #include "dawn/platform/DawnPlatform.h"
 
 namespace utils {
-    class Timer;
+class Timer;
 }
 
 class DawnPerfTestPlatform : public dawn::platform::Platform {
@@ -34,15 +34,17 @@
     // See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
     // Only a subset of the properties are implemented.
     struct TraceEvent final {
-        TraceEvent() {
-        }
+        TraceEvent() {}
         TraceEvent(char phaseIn,
                    dawn::platform::TraceCategory categoryIn,
                    const char* nameIn,
                    uint64_t idIn,
                    double timestampIn)
-            : phase(phaseIn), category(categoryIn), name(nameIn), id(idIn), timestamp(timestampIn) {
-        }
+            : phase(phaseIn),
+              category(categoryIn),
+              name(nameIn),
+              id(idIn),
+              timestamp(timestampIn) {}
 
         char phase = 0;
         dawn::platform::TraceCategory category;
diff --git a/src/dawn/tests/perf_tests/DrawCallPerf.cpp b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
index 2dd0b93..7b2cc02 100644
--- a/src/dawn/tests/perf_tests/DrawCallPerf.cpp
+++ b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
@@ -24,23 +24,23 @@
 
 namespace {
 
-    constexpr unsigned int kNumDraws = 2000;
+constexpr unsigned int kNumDraws = 2000;
 
-    constexpr uint32_t kTextureSize = 64;
-    constexpr size_t kUniformSize = 3 * sizeof(float);
+constexpr uint32_t kTextureSize = 64;
+constexpr size_t kUniformSize = 3 * sizeof(float);
 
-    constexpr float kVertexData[12] = {
-        0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
-    };
+constexpr float kVertexData[12] = {
+    0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
+};
 
-    constexpr char kVertexShader[] = R"(
+constexpr char kVertexShader[] = R"(
         @stage(vertex) fn main(
             @location(0) pos : vec4<f32>
         ) -> @builtin(position) vec4<f32> {
             return pos;
         })";
 
-    constexpr char kFragmentShaderA[] = R"(
+constexpr char kFragmentShaderA[] = R"(
         struct Uniforms {
             color : vec3<f32>
         }
@@ -49,7 +49,7 @@
             return vec4<f32>(uniforms.color * (1.0 / 5000.0), 1.0);
         })";
 
-    constexpr char kFragmentShaderB[] = R"(
+constexpr char kFragmentShaderB[] = R"(
         struct Constants {
             color : vec3<f32>
         }
@@ -63,149 +63,147 @@
             return vec4<f32>((constants.color + uniforms.color) * (1.0 / 5000.0), 1.0);
         })";
 
-    enum class Pipeline {
-        Static,     // Keep the same pipeline for all draws.
-        Redundant,  // Use the same pipeline, but redundantly set it.
-        Dynamic,    // Change the pipeline between draws.
+enum class Pipeline {
+    Static,     // Keep the same pipeline for all draws.
+    Redundant,  // Use the same pipeline, but redundantly set it.
+    Dynamic,    // Change the pipeline between draws.
+};
+
+enum class UniformData {
+    Static,   // Don't update per-draw uniform data.
+    Dynamic,  // Update the per-draw uniform data once per frame.
+};
+
+enum class BindGroup {
+    NoChange,   // Use one bind group for all draws.
+    Redundant,  // Use the same bind group, but redundantly set it.
+    NoReuse,    // Create a new bind group every time.
+    Multiple,   // Use multiple static bind groups.
+    Dynamic,    // Use bind groups with dynamic offsets.
+};
+
+enum class VertexBuffer {
+    NoChange,  // Use one vertex buffer for all draws.
+    Multiple,  // Use multiple static vertex buffers.
+    Dynamic,   // Switch vertex buffers between draws.
+};
+
+enum class RenderBundle {
+    No,   // Record commands in a render pass
+    Yes,  // Record commands in a render bundle
+};
+
+struct DrawCallParam {
+    Pipeline pipelineType;
+    VertexBuffer vertexBufferType;
+    BindGroup bindGroupType;
+    UniformData uniformDataType;
+    RenderBundle withRenderBundle;
+};
+
+using DrawCallParamTuple = std::tuple<Pipeline, VertexBuffer, BindGroup, UniformData, RenderBundle>;
+
+template <typename T>
+unsigned int AssignParam(T& lhs, T rhs) {
+    lhs = rhs;
+    return 0u;
+}
+
+// This helper function allows creating a DrawCallParam from a list of arguments
+// without specifying all of the members. Provided members can be passed once in an arbitrary
+// order. Unspecified members default to:
+//  - Pipeline::Static
+//  - VertexBuffer::NoChange
+//  - BindGroup::NoChange
+//  - UniformData::Static
+//  - RenderBundle::No
+template <typename... Ts>
+DrawCallParam MakeParam(Ts... args) {
+    // Baseline param
+    DrawCallParamTuple paramTuple{Pipeline::Static, VertexBuffer::NoChange, BindGroup::NoChange,
+                                  UniformData::Static, RenderBundle::No};
+
+    unsigned int unused[] = {
+        0,  // Avoid making a 0-sized array.
+        AssignParam(std::get<Ts>(paramTuple), args)...,
     };
+    DAWN_UNUSED(unused);
 
-    enum class UniformData {
-        Static,   // Don't update per-draw uniform data.
-        Dynamic,  // Update the per-draw uniform data once per frame.
+    return DrawCallParam{
+        std::get<Pipeline>(paramTuple),     std::get<VertexBuffer>(paramTuple),
+        std::get<BindGroup>(paramTuple),    std::get<UniformData>(paramTuple),
+        std::get<RenderBundle>(paramTuple),
     };
+}
 
-    enum class BindGroup {
-        NoChange,   // Use one bind group for all draws.
-        Redundant,  // Use the same bind group, but redundantly set it.
-        NoReuse,    // Create a new bind group every time.
-        Multiple,   // Use multiple static bind groups.
-        Dynamic,    // Use bind groups with dynamic offsets.
-    };
+struct DrawCallParamForTest : AdapterTestParam {
+    DrawCallParamForTest(const AdapterTestParam& backendParam, DrawCallParam param)
+        : AdapterTestParam(backendParam), param(param) {}
+    DrawCallParam param;
+};
 
-    enum class VertexBuffer {
-        NoChange,  // Use one vertex buffer for all draws.
-        Multiple,  // Use multiple static vertex buffers.
-        Dynamic,   // Switch vertex buffers between draws.
-    };
+std::ostream& operator<<(std::ostream& ostream, const DrawCallParamForTest& testParams) {
+    ostream << static_cast<const AdapterTestParam&>(testParams);
 
-    enum class RenderBundle {
-        No,   // Record commands in a render pass
-        Yes,  // Record commands in a render bundle
-    };
+    const DrawCallParam& param = testParams.param;
 
-    struct DrawCallParam {
-        Pipeline pipelineType;
-        VertexBuffer vertexBufferType;
-        BindGroup bindGroupType;
-        UniformData uniformDataType;
-        RenderBundle withRenderBundle;
-    };
-
-    using DrawCallParamTuple =
-        std::tuple<Pipeline, VertexBuffer, BindGroup, UniformData, RenderBundle>;
-
-    template <typename T>
-    unsigned int AssignParam(T& lhs, T rhs) {
-        lhs = rhs;
-        return 0u;
+    switch (param.pipelineType) {
+        case Pipeline::Static:
+            break;
+        case Pipeline::Redundant:
+            ostream << "_RedundantPipeline";
+            break;
+        case Pipeline::Dynamic:
+            ostream << "_DynamicPipeline";
+            break;
     }
 
-    // This helper function allows creating a DrawCallParam from a list of arguments
-    // without specifying all of the members. Provided members can be passed once in an arbitrary
-    // order. Unspecified members default to:
-    //  - Pipeline::Static
-    //  - VertexBuffer::NoChange
-    //  - BindGroup::NoChange
-    //  - UniformData::Static
-    //  - RenderBundle::No
-    template <typename... Ts>
-    DrawCallParam MakeParam(Ts... args) {
-        // Baseline param
-        DrawCallParamTuple paramTuple{Pipeline::Static, VertexBuffer::NoChange, BindGroup::NoChange,
-                                      UniformData::Static, RenderBundle::No};
-
-        unsigned int unused[] = {
-            0,  // Avoid making a 0-sized array.
-            AssignParam(std::get<Ts>(paramTuple), args)...,
-        };
-        DAWN_UNUSED(unused);
-
-        return DrawCallParam{
-            std::get<Pipeline>(paramTuple),     std::get<VertexBuffer>(paramTuple),
-            std::get<BindGroup>(paramTuple),    std::get<UniformData>(paramTuple),
-            std::get<RenderBundle>(paramTuple),
-        };
+    switch (param.vertexBufferType) {
+        case VertexBuffer::NoChange:
+            break;
+        case VertexBuffer::Multiple:
+            ostream << "_MultipleVertexBuffers";
+            break;
+        case VertexBuffer::Dynamic:
+            ostream << "_DynamicVertexBuffer";
     }
 
-    struct DrawCallParamForTest : AdapterTestParam {
-        DrawCallParamForTest(const AdapterTestParam& backendParam, DrawCallParam param)
-            : AdapterTestParam(backendParam), param(param) {
-        }
-        DrawCallParam param;
-    };
-
-    std::ostream& operator<<(std::ostream& ostream, const DrawCallParamForTest& testParams) {
-        ostream << static_cast<const AdapterTestParam&>(testParams);
-
-        const DrawCallParam& param = testParams.param;
-
-        switch (param.pipelineType) {
-            case Pipeline::Static:
-                break;
-            case Pipeline::Redundant:
-                ostream << "_RedundantPipeline";
-                break;
-            case Pipeline::Dynamic:
-                ostream << "_DynamicPipeline";
-                break;
-        }
-
-        switch (param.vertexBufferType) {
-            case VertexBuffer::NoChange:
-                break;
-            case VertexBuffer::Multiple:
-                ostream << "_MultipleVertexBuffers";
-                break;
-            case VertexBuffer::Dynamic:
-                ostream << "_DynamicVertexBuffer";
-        }
-
-        switch (param.bindGroupType) {
-            case BindGroup::NoChange:
-                break;
-            case BindGroup::Redundant:
-                ostream << "_RedundantBindGroups";
-                break;
-            case BindGroup::NoReuse:
-                ostream << "_NoReuseBindGroups";
-                break;
-            case BindGroup::Multiple:
-                ostream << "_MultipleBindGroups";
-                break;
-            case BindGroup::Dynamic:
-                ostream << "_DynamicBindGroup";
-                break;
-        }
-
-        switch (param.uniformDataType) {
-            case UniformData::Static:
-                break;
-            case UniformData::Dynamic:
-                ostream << "_DynamicData";
-                break;
-        }
-
-        switch (param.withRenderBundle) {
-            case RenderBundle::No:
-                break;
-            case RenderBundle::Yes:
-                ostream << "_RenderBundle";
-                break;
-        }
-
-        return ostream;
+    switch (param.bindGroupType) {
+        case BindGroup::NoChange:
+            break;
+        case BindGroup::Redundant:
+            ostream << "_RedundantBindGroups";
+            break;
+        case BindGroup::NoReuse:
+            ostream << "_NoReuseBindGroups";
+            break;
+        case BindGroup::Multiple:
+            ostream << "_MultipleBindGroups";
+            break;
+        case BindGroup::Dynamic:
+            ostream << "_DynamicBindGroup";
+            break;
     }
 
+    switch (param.uniformDataType) {
+        case UniformData::Static:
+            break;
+        case UniformData::Dynamic:
+            ostream << "_DynamicData";
+            break;
+    }
+
+    switch (param.withRenderBundle) {
+        case RenderBundle::No:
+            break;
+        case RenderBundle::Yes:
+            ostream << "_RenderBundle";
+            break;
+    }
+
+    return ostream;
+}
+
 }  // anonymous namespace
 
 // DrawCallPerf is an uber-benchmark with supports many parameterizations.
@@ -224,16 +222,13 @@
 //     the efficiency of resource transitions.
 class DrawCallPerf : public DawnPerfTestWithParams<DrawCallParamForTest> {
   public:
-    DrawCallPerf() : DawnPerfTestWithParams(kNumDraws, 3) {
-    }
+    DrawCallPerf() : DawnPerfTestWithParams(kNumDraws, 3) {}
     ~DrawCallPerf() override = default;
 
     void SetUp() override;
 
   protected:
-    DrawCallParam GetParam() const {
-        return DawnPerfTestWithParams::GetParam().param;
-    }
+    DrawCallParam GetParam() const { return DawnPerfTestWithParams::GetParam().param; }
 
     template <typename Encoder>
     void RecordRenderCommands(Encoder encoder);
diff --git a/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
index 4b99de1..b6cc974 100644
--- a/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
+++ b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
@@ -19,9 +19,9 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    constexpr uint32_t kTileSize = 32u;
+constexpr uint32_t kTileSize = 32u;
 
-    const std::string& kMatMulFloatHeader = R"(
+const std::string& kMatMulFloatHeader = R"(
         struct Uniforms {
             dimAOuter : u32,
             dimInner : u32,
@@ -68,13 +68,13 @@
         let TileBOuter : u32 = 32u;
         let TileInner : u32 = 32u;)";
 
-    const std::string& kMatMulFloatSharedArray1D = R"(
+const std::string& kMatMulFloatSharedArray1D = R"(
         var<workgroup> mm_Asub : array<f32, 1024>;
         var<workgroup> mm_Bsub : array<f32, 1024>;)";
-    const std::string& kMatMulFloatSharedArray2D = R"(
+const std::string& kMatMulFloatSharedArray2D = R"(
         var<workgroup> mm_Asub : array<array<f32, 32>, 32>;
         var<workgroup> mm_Bsub : array<array<f32, 32>, 32>;)";
-    const std::string& kMatMulFloatBodyPart1 = R"(
+const std::string& kMatMulFloatBodyPart1 = R"(
         @stage(compute) @workgroup_size(8, 8, 1)
         fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
                 @builtin(global_invocation_id) global_id  : vec3<u32>) {
@@ -109,7 +109,7 @@
                 for (var innerCol : u32 = 0u; innerCol < ColPerThreadA; innerCol = innerCol + 1u) {
                     let inputRow : u32 = tileRow + innerRow;
                     let inputCol : u32 = tileColA + innerCol;)";
-    const std::string& kMatMulFloatBodyPart2Array1D = R"(
+const std::string& kMatMulFloatBodyPart2Array1D = R"(
                     let index : u32 = inputRow * TileInner + inputCol;
                     mm_Asub[index] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
                 }
@@ -135,7 +135,7 @@
 
                     for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                         ACached = mm_Asub[(tileRow + innerRow) * TileInner + k];)";
-    const std::string& kMatMulFloatBodyPart2Array2D = R"(
+const std::string& kMatMulFloatBodyPart2Array2D = R"(
                     mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
                 }
                 }
@@ -159,7 +159,7 @@
 
                     for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                         ACached = mm_Asub[tileRow + innerRow][k];)";
-    const std::string& kMatMulFloatBodyPart3 = R"(
+const std::string& kMatMulFloatBodyPart3 = R"(
                         for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
                             let index : u32 = innerRow * ColPerThread + innerCol;
                             acc[index] = acc[index] + ACached * BCached[innerCol];
@@ -179,16 +179,16 @@
             }
             }
         })";
-    const std::string& kMatMulFloatOneDimensionalSharedArray =
-        kMatMulFloatHeader + kMatMulFloatSharedArray1D + kMatMulFloatBodyPart1 +
-        kMatMulFloatBodyPart2Array1D + kMatMulFloatBodyPart3;
+const std::string& kMatMulFloatOneDimensionalSharedArray =
+    kMatMulFloatHeader + kMatMulFloatSharedArray1D + kMatMulFloatBodyPart1 +
+    kMatMulFloatBodyPart2Array1D + kMatMulFloatBodyPart3;
 
-    const std::string& kMatMulFloatTwoDimensionalSharedArray =
-        kMatMulFloatHeader + kMatMulFloatSharedArray2D + kMatMulFloatBodyPart1 +
-        kMatMulFloatBodyPart2Array2D + kMatMulFloatBodyPart3;
+const std::string& kMatMulFloatTwoDimensionalSharedArray =
+    kMatMulFloatHeader + kMatMulFloatSharedArray2D + kMatMulFloatBodyPart1 +
+    kMatMulFloatBodyPart2Array2D + kMatMulFloatBodyPart3;
 
-    // The vec4 version requires that dimInner and dimBOuter are divisible by 4.
-    const std::string& kMatMulVec4Header = R"(
+// The vec4 version requires that dimInner and dimBOuter are divisible by 4.
+const std::string& kMatMulVec4Header = R"(
         struct Uniforms {
             dimAOuter : u32,
             dimInner : u32,
@@ -233,13 +233,13 @@
         let ColPerThread : u32 = 4u;
         let TileOuter : u32 = 32u;
         let TileInner : u32 = 32u;)";
-    const std::string& kMatMulVec4SharedArray1D = R"(
+const std::string& kMatMulVec4SharedArray1D = R"(
         var<workgroup> mm_Asub : array<vec4<f32>, 256>;
         var<workgroup> mm_Bsub : array<vec4<f32>, 256>;)";
-    const std::string& kMatMulVec4SharedArray2D = R"(
+const std::string& kMatMulVec4SharedArray2D = R"(
         var<workgroup> mm_Asub : array<array<vec4<f32>, 8>, 32>;
         var<workgroup> mm_Bsub : array<array<vec4<f32>, 8>, 32>;)";
-    const std::string& kMatMulVec4BodyPart1 = R"(
+const std::string& kMatMulVec4BodyPart1 = R"(
         @stage(compute) @workgroup_size(8, 8, 1)
         fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
                 @builtin(global_invocation_id) global_id  : vec3<u32>) {
@@ -272,7 +272,7 @@
                 for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                     let inputRow : u32 = tileRow + innerRow;
                     let inputCol : u32 = tileCol;)";
-    const std::string& kMatMulVec4BodyPart2Array1D = R"(
+const std::string& kMatMulVec4BodyPart2Array1D = R"(
                     let index : u32 = inputRow * TileInner / ColPerThread + inputCol;
                     mm_Asub[index] = mm_readA(globalRow + innerRow, globalColA);
                 }
@@ -297,7 +297,7 @@
 
                     for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
                         ACached = mm_Asub[(tileRow + i) * (TileInner / ColPerThread) + k];)";
-    const std::string& kMatMulVec4BodyPart2Array2D = R"(
+const std::string& kMatMulVec4BodyPart2Array2D = R"(
                     mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, globalColA);
                 }
                 globalColA = globalColA + TileInner / ColPerThread;
@@ -320,7 +320,7 @@
 
                     for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
                         ACached = mm_Asub[tileRow + i][k];)";
-    const std::string& kMatMulVec4BodyPart3 = R"(
+const std::string& kMatMulVec4BodyPart3 = R"(
                         acc[i] = BCached[0] * ACached.x + acc[i];
                         acc[i] = BCached[1] * ACached.y + acc[i];
                         acc[i] = BCached[2] * ACached.z + acc[i];
@@ -338,45 +338,45 @@
             }
         })";
 
-    const std::string& kMatMulVec4OneDimensionalSharedArray =
-        kMatMulVec4Header + kMatMulVec4SharedArray1D + kMatMulVec4BodyPart1 +
-        kMatMulVec4BodyPart2Array1D + kMatMulVec4BodyPart3;
+const std::string& kMatMulVec4OneDimensionalSharedArray =
+    kMatMulVec4Header + kMatMulVec4SharedArray1D + kMatMulVec4BodyPart1 +
+    kMatMulVec4BodyPart2Array1D + kMatMulVec4BodyPart3;
 
-    const std::string& kMatMulVec4TwoDimensionalSharedArray =
-        kMatMulVec4Header + kMatMulVec4SharedArray2D + kMatMulVec4BodyPart1 +
-        kMatMulVec4BodyPart2Array2D + kMatMulVec4BodyPart3;
+const std::string& kMatMulVec4TwoDimensionalSharedArray =
+    kMatMulVec4Header + kMatMulVec4SharedArray2D + kMatMulVec4BodyPart1 +
+    kMatMulVec4BodyPart2Array2D + kMatMulVec4BodyPart3;
 
-    constexpr unsigned int kNumIterations = 50;
+constexpr unsigned int kNumIterations = 50;
 
-    enum class MatMulMethod {
-        MatMulFloatOneDimSharedArray,
-        MatMulFloatTwoDimSharedArray,
-        MatMulVec4OneDimSharedArray,
-        MatMulVec4TwoDimSharedArray
-    };
+enum class MatMulMethod {
+    MatMulFloatOneDimSharedArray,
+    MatMulFloatTwoDimSharedArray,
+    MatMulVec4OneDimSharedArray,
+    MatMulVec4TwoDimSharedArray
+};
 
-    std::ostream& operator<<(std::ostream& ostream, const MatMulMethod& matMulMethod) {
-        switch (matMulMethod) {
-            case MatMulMethod::MatMulFloatOneDimSharedArray:
-                ostream << "MatMulFloatOneDimSharedArray";
-                break;
-            case MatMulMethod::MatMulFloatTwoDimSharedArray:
-                ostream << "MatMulFloatTwoDimSharedArray";
-                break;
-            case MatMulMethod::MatMulVec4OneDimSharedArray:
-                ostream << "MatMulVec4OneDimSharedArray";
-                break;
-            case MatMulMethod::MatMulVec4TwoDimSharedArray:
-                ostream << "MatMulVec4TwoDimSharedArray";
-                break;
-        }
-        return ostream;
+std::ostream& operator<<(std::ostream& ostream, const MatMulMethod& matMulMethod) {
+    switch (matMulMethod) {
+        case MatMulMethod::MatMulFloatOneDimSharedArray:
+            ostream << "MatMulFloatOneDimSharedArray";
+            break;
+        case MatMulMethod::MatMulFloatTwoDimSharedArray:
+            ostream << "MatMulFloatTwoDimSharedArray";
+            break;
+        case MatMulMethod::MatMulVec4OneDimSharedArray:
+            ostream << "MatMulVec4OneDimSharedArray";
+            break;
+        case MatMulMethod::MatMulVec4TwoDimSharedArray:
+            ostream << "MatMulVec4TwoDimSharedArray";
+            break;
     }
+    return ostream;
+}
 
-    using DimAOuter = uint32_t;
-    using DimInner = uint32_t;
-    using DimBOuter = uint32_t;
-    DAWN_TEST_PARAM_STRUCT(ShaderRobustnessParams, MatMulMethod, DimAOuter, DimInner, DimBOuter);
+using DimAOuter = uint32_t;
+using DimInner = uint32_t;
+using DimBOuter = uint32_t;
+DAWN_TEST_PARAM_STRUCT(ShaderRobustnessParams, MatMulMethod, DimAOuter, DimInner, DimBOuter);
 
 }  // namespace
 
@@ -388,8 +388,7 @@
         : DawnPerfTestWithParams(kNumIterations, 1),
           mDimAOuter(GetParam().mDimAOuter),
           mDimInner(GetParam().mDimInner),
-          mDimBOuter(GetParam().mDimBOuter) {
-    }
+          mDimBOuter(GetParam().mDimBOuter) {}
     ~ShaderRobustnessPerf() override = default;
 
     void SetUp() override;
diff --git a/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
index b70c68b..33ae07e 100644
--- a/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
+++ b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
@@ -23,8 +23,7 @@
                               uint32_t mipLevelCountIn)
         : AdapterTestParam(param),
           arrayLayerCount(arrayLayerCountIn),
-          mipLevelCount(mipLevelCountIn) {
-    }
+          mipLevelCount(mipLevelCountIn) {}
     uint32_t arrayLayerCount;
     uint32_t mipLevelCount;
 };
@@ -44,8 +43,7 @@
   public:
     static constexpr unsigned int kNumIterations = 50;
 
-    SubresourceTrackingPerf() : DawnPerfTestWithParams(kNumIterations, 1) {
-    }
+    SubresourceTrackingPerf() : DawnPerfTestWithParams(kNumIterations, 1) {}
     ~SubresourceTrackingPerf() override = default;
 
     void SetUp() override {
diff --git a/src/dawn/tests/unittests/AsyncTaskTests.cpp b/src/dawn/tests/unittests/AsyncTaskTests.cpp
index 7ebde3e..a61baa9 100644
--- a/src/dawn/tests/unittests/AsyncTaskTests.cpp
+++ b/src/dawn/tests/unittests/AsyncTaskTests.cpp
@@ -29,38 +29,38 @@
 
 namespace {
 
-    struct SimpleTaskResult {
-        uint32_t id;
-    };
+struct SimpleTaskResult {
+    uint32_t id;
+};
 
-    // A thread-safe queue that stores the task results.
-    class ConcurrentTaskResultQueue : public NonCopyable {
-      public:
-        void AddResult(std::unique_ptr<SimpleTaskResult> result) {
-            std::lock_guard<std::mutex> lock(mMutex);
-            mTaskResults.push_back(std::move(result));
-        }
-
-        std::vector<std::unique_ptr<SimpleTaskResult>> GetAllResults() {
-            std::vector<std::unique_ptr<SimpleTaskResult>> outputResults;
-            {
-                std::lock_guard<std::mutex> lock(mMutex);
-                outputResults.swap(mTaskResults);
-            }
-            return outputResults;
-        }
-
-      private:
-        std::mutex mMutex;
-        std::vector<std::unique_ptr<SimpleTaskResult>> mTaskResults;
-    };
-
-    void DoTask(ConcurrentTaskResultQueue* resultQueue, uint32_t id) {
-        std::unique_ptr<SimpleTaskResult> result = std::make_unique<SimpleTaskResult>();
-        result->id = id;
-        resultQueue->AddResult(std::move(result));
+// A thread-safe queue that stores the task results.
+class ConcurrentTaskResultQueue : public NonCopyable {
+  public:
+    void AddResult(std::unique_ptr<SimpleTaskResult> result) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mTaskResults.push_back(std::move(result));
     }
 
+    std::vector<std::unique_ptr<SimpleTaskResult>> GetAllResults() {
+        std::vector<std::unique_ptr<SimpleTaskResult>> outputResults;
+        {
+            std::lock_guard<std::mutex> lock(mMutex);
+            outputResults.swap(mTaskResults);
+        }
+        return outputResults;
+    }
+
+  private:
+    std::mutex mMutex;
+    std::vector<std::unique_ptr<SimpleTaskResult>> mTaskResults;
+};
+
+void DoTask(ConcurrentTaskResultQueue* resultQueue, uint32_t id) {
+    std::unique_ptr<SimpleTaskResult> result = std::make_unique<SimpleTaskResult>();
+    result->id = id;
+    resultQueue->AddResult(std::move(result));
+}
+
 }  // anonymous namespace
 
 class AsyncTaskTest : public testing::Test {};
diff --git a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
index 2c76322..ad716c3 100644
--- a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
@@ -19,313 +19,313 @@
 
 namespace dawn::native {
 
-    constexpr uint64_t BuddyAllocator::kInvalidOffset;
+constexpr uint64_t BuddyAllocator::kInvalidOffset;
 
-    // Verify the buddy allocator with a basic test.
-    TEST(BuddyAllocatorTests, SingleBlock) {
-        // After one 32 byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               A              |
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
+// Verify the buddy allocator with a basic test.
+TEST(BuddyAllocatorTests, SingleBlock) {
+    // After one 32 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               A              |
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Check that we cannot allocate a oversized block.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+
+    // Check that we cannot allocate a zero sized block.
+    ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
+
+    // Allocate the block.
+    uint64_t blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
+
+    // Check that we are full.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate the block.
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify multiple allocations succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, MultipleBlocks) {
+    // Fill every level in the allocator (order-n = 2^n)
+    const uint64_t maxBlockSize = (1ull << 16);
+    for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
         BuddyAllocator allocator(maxBlockSize);
 
-        // Check that we cannot allocate a oversized block.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
-
-        // Check that we cannot allocate a zero sized block.
-        ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
-
-        // Allocate the block.
-        uint64_t blockOffset = allocator.Allocate(maxBlockSize);
-        ASSERT_EQ(blockOffset, 0u);
-
-        // Check that we are full.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-
-        // Deallocate the block.
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
-
-    // Verify multiple allocations succeeds using a buddy allocator.
-    TEST(BuddyAllocatorTests, MultipleBlocks) {
-        // Fill every level in the allocator (order-n = 2^n)
-        const uint64_t maxBlockSize = (1ull << 16);
-        for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
-            BuddyAllocator allocator(maxBlockSize);
-
-            uint64_t blockSize = (1ull << order);
-            for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
-                ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
-            }
+        uint64_t blockSize = (1ull << order);
+        for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
+            ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
         }
     }
+}
 
-    // Verify that a single allocation succeeds using a buddy allocator.
-    TEST(BuddyAllocatorTests, SingleSplitBlock) {
-        //  After one 8 byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       F      |        S - split
-        //                 --------------------------------        F - free
-        //      2       8  |   A   |   F   |       |      |        A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        BuddyAllocator allocator(maxBlockSize);
+// Verify that a single allocation succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, SingleSplitBlock) {
+    //  After one 8 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       F      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   A   |   F   |       |      |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
 
-        // Allocate block (splits two blocks).
-        uint64_t blockOffset = allocator.Allocate(8);
-        ASSERT_EQ(blockOffset, 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+    // Allocate block (splits two blocks).
+    uint64_t blockOffset = allocator.Allocate(8);
+    ASSERT_EQ(blockOffset, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Deallocate block (merges two blocks).
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Deallocate block (merges two blocks).
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
 
-        // Check that we cannot allocate a block that is oversized.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+    // Check that we cannot allocate a block that is oversized.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
 
-        // Re-allocate the largest block allowed after merging.
-        blockOffset = allocator.Allocate(maxBlockSize);
-        ASSERT_EQ(blockOffset, 0u);
+    // Re-allocate the largest block allowed after merging.
+    blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
 
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify that a multiple allocated blocks can be removed in the free-list.
+TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
+    //  After four 16 byte allocations:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   Aa  |   Ab  |  Ac  |   Ad  |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Populates the free-list with four blocks at Level2.
+
+    // Allocate "a" block (two splits).
+    constexpr uint64_t blockSizeInBytes = 8;
+    uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetA, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Allocate "b" block.
+    uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetB, blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "c" block (three splits).
+    uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "d" block.
+    uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate "d" block.
+    // FreeList[Level2] = [BlockD] -> x
+    allocator.Deallocate(blockOffsetD);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Deallocate "b" block.
+    // FreeList[Level2] = [BlockB] -> [BlockD] -> x
+    allocator.Deallocate(blockOffsetB);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "c" block (one merges).
+    // FreeList[Level1] = [BlockCD] -> x
+    // FreeList[Level2] = [BlockB] -> x
+    allocator.Deallocate(blockOffsetC);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "a" block (two merges).
+    // FreeList[Level0] = [BlockABCD] -> x
+    allocator.Deallocate(blockOffsetA);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can handle allocations of various sizes.
+TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
+    //  After four Level4-to-Level1 byte then one L4 block allocations:
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               A               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       A       |               |               |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   A   |       |       |       |       |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(32), 0ull);
+    ASSERT_EQ(allocator.Allocate(64), 64ull);
+    ASSERT_EQ(allocator.Allocate(128), 128ull);
+    ASSERT_EQ(allocator.Allocate(256), 256ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Fill in the last free block.
+    ASSERT_EQ(allocator.Allocate(32), 32ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Check if we're full.
+    ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
+}
+
+// Verify very small allocations using a larger allocator works correctly.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
+    //  After allocating four pairs of one 64 byte block and one 32 byte block.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |       S       |       F       |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   A   |   S   |   A   |   A   |   S   |   A   |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 |   |   | A | A |   |   |   |   | A | A |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(64), 0ull);
+    ASSERT_EQ(allocator.Allocate(32), 64ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 128ull);
+    ASSERT_EQ(allocator.Allocate(32), 96ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 192ull);
+    ASSERT_EQ(allocator.Allocate(32), 256ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 320ull);
+    ASSERT_EQ(allocator.Allocate(32), 288ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can deal with bad fragmentation.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
+    //  Allocate every leaf then de-allocate every other of those allocations.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |        S       |        S     |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   S   |   S   |   S   |   S   |   S   |   S   |   S   |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Allocate leaf blocks
+    constexpr uint64_t minBlockSizeInBytes = 32;
+    std::vector<uint64_t> blockOffsets;
+    for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
+        blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
     }
 
-    // Verify that a multiple allocated blocks can be removed in the free-list.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
-        //  After four 16 byte allocations:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       S      |        S - split
-        //                 --------------------------------        F - free
-        //      2       8  |   Aa  |   Ab  |  Ac  |   Ad  |        A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        BuddyAllocator allocator(maxBlockSize);
-
-        // Populates the free-list with four blocks at Level2.
-
-        // Allocate "a" block (two splits).
-        constexpr uint64_t blockSizeInBytes = 8;
-        uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetA, 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate "b" block.
-        uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetB, blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Allocate "c" block (three splits).
-        uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Allocate "d" block.
-        uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-
-        // Deallocate "d" block.
-        // FreeList[Level2] = [BlockD] -> x
-        allocator.Deallocate(blockOffsetD);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Deallocate "b" block.
-        // FreeList[Level2] = [BlockB] -> [BlockD] -> x
-        allocator.Deallocate(blockOffsetB);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Deallocate "c" block (one merges).
-        // FreeList[Level1] = [BlockCD] -> x
-        // FreeList[Level2] = [BlockB] -> x
-        allocator.Deallocate(blockOffsetC);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Deallocate "a" block (two merges).
-        // FreeList[Level0] = [BlockABCD] -> x
-        allocator.Deallocate(blockOffsetA);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Free every other leaf block.
+    for (size_t count = 1; count < blockOffsets.size(); count += 2) {
+        allocator.Deallocate(blockOffsets[count]);
     }
 
-    // Verify the buddy allocator can handle allocations of various sizes.
-    TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
-        //  After four Level4-to-Level1 byte then one L4 block allocations:
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               A               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       A       |               |               |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   S   |   A   |       |       |       |       |       |       |
-        //                 -----------------------------------------------------------------
-        //      4       32 | A | F |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
+}
 
-        ASSERT_EQ(allocator.Allocate(32), 0ull);
-        ASSERT_EQ(allocator.Allocate(64), 64ull);
-        ASSERT_EQ(allocator.Allocate(128), 128ull);
-        ASSERT_EQ(allocator.Allocate(256), 256ull);
+// Verify the buddy allocator can deal with multiple allocations with mixed alignments.
+TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
+    //  After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
+    //  byte alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   F   |  Ab   |  Ac  |       A - allocated
+    //                 --------------------------------
+    //
+    BuddyAllocator allocator(32);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Allocate Aa (two splits).
+    ASSERT_EQ(allocator.Allocate(8, 16), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Fill in the last free block.
-        ASSERT_EQ(allocator.Allocate(32), 32ull);
+    // Allocate Ab (skip Aa buddy due to alignment and perform another split).
+    ASSERT_EQ(allocator.Allocate(8, 16), 16u);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Check if we're full.
-        ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
-    }
+    // Check that we cannot fit another.
+    ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
 
-    // Verify very small allocations using a larger allocator works correctly.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
-        //  After allocating four pairs of one 64 byte block and one 32 byte block.
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               S               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       S       |       S       |       F       |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   A   |   S   |   A   |   A   |   S   |   A   |       |       |
-        //                 -----------------------------------------------------------------
-        //      4       32 |   |   | A | A |   |   |   |   | A | A |   |   |   |   |   |   |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    // Allocate Ac (zero splits and Ab's buddy is now the first free block).
+    ASSERT_EQ(allocator.Allocate(8, 8), 24u);
 
-        ASSERT_EQ(allocator.Allocate(64), 0ull);
-        ASSERT_EQ(allocator.Allocate(32), 64ull);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
 
-        ASSERT_EQ(allocator.Allocate(64), 128ull);
-        ASSERT_EQ(allocator.Allocate(32), 96ull);
+// Verify the buddy allocator can deal with multiple allocations with equal alignments.
+TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
+    //  After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
+    //  byte alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       Ac     |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   Ab  |              |       A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    constexpr uint64_t alignment = 4;
+    BuddyAllocator allocator(maxBlockSize);
 
-        ASSERT_EQ(allocator.Allocate(64), 192ull);
-        ASSERT_EQ(allocator.Allocate(32), 256ull);
+    // Allocate block Aa (two splits)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        ASSERT_EQ(allocator.Allocate(64), 320ull);
-        ASSERT_EQ(allocator.Allocate(32), 288ull);
+    // Allocate block Ab (Aa's buddy)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
 
-    // Verify the buddy allocator can deal with bad fragmentation.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
-        //  Allocate every leaf then de-allocate every other of those allocations.
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               S               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       S       |        S       |        S     |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   S   |   S   |   S   |   S   |   S   |   S   |   S   |   S   |
-        //                 -----------------------------------------------------------------
-        //      4       32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    // Check that we can still allocate Ac.
+    ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
 
-        // Allocate leaf blocks
-        constexpr uint64_t minBlockSizeInBytes = 32;
-        std::vector<uint64_t> blockOffsets;
-        for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
-            blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
-        }
-
-        // Free every other leaf block.
-        for (size_t count = 1; count < blockOffsets.size(); count += 2) {
-            allocator.Deallocate(blockOffsets[count]);
-        }
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
-    }
-
-    // Verify the buddy allocator can deal with multiple allocations with mixed alignments.
-    TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
-        //  After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
-        //  byte alignment.
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       S      |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   F   |  Ab   |  Ac  |       A - allocated
-        //                 --------------------------------
-        //
-        BuddyAllocator allocator(32);
-
-        // Allocate Aa (two splits).
-        ASSERT_EQ(allocator.Allocate(8, 16), 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate Ab (skip Aa buddy due to alignment and perform another split).
-        ASSERT_EQ(allocator.Allocate(8, 16), 16u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Check that we cannot fit another.
-        ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
-
-        // Allocate Ac (zero splits and Ab's buddy is now the first free block).
-        ASSERT_EQ(allocator.Allocate(8, 8), 24u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
-
-    // Verify the buddy allocator can deal with multiple allocations with equal alignments.
-    TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
-        //  After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
-        //  byte alignment.
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       Ac     |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   Ab  |              |       A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        constexpr uint64_t alignment = 4;
-        BuddyAllocator allocator(maxBlockSize);
-
-        // Allocate block Aa (two splits)
-        ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate block Ab (Aa's buddy)
-        ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Check that we can still allocate Ac.
-        ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-    }
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
index c70af25..86ed75b 100644
--- a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
@@ -17,448 +17,442 @@
 #include <utility>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/native/BuddyMemoryAllocator.h"
 #include "dawn/native/PooledResourceMemoryAllocator.h"
 #include "dawn/native/ResourceHeapAllocator.h"
+#include "gtest/gtest.h"
 
 namespace dawn::native {
 
-    class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
-      public:
-        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) override {
-            return std::make_unique<ResourceHeapBase>();
-        }
-        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
-        }
-    };
+class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
+  public:
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
+        return std::make_unique<ResourceHeapBase>();
+    }
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {}
+};
 
-    class PlaceholderBuddyResourceAllocator {
-      public:
-        PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
-            : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
-        }
+class PlaceholderBuddyResourceAllocator {
+  public:
+    PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
+        : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {}
 
-        PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
-                                          uint64_t memorySize,
-                                          ResourceHeapAllocator* heapAllocator)
-            : mAllocator(maxBlockSize, memorySize, heapAllocator) {
-        }
+    PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
+                                      uint64_t memorySize,
+                                      ResourceHeapAllocator* heapAllocator)
+        : mAllocator(maxBlockSize, memorySize, heapAllocator) {}
 
-        ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
-            ResultOrError<ResourceMemoryAllocation> result =
-                mAllocator.Allocate(allocationSize, alignment);
-            return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
-        }
-
-        void Deallocate(ResourceMemoryAllocation& allocation) {
-            mAllocator.Deallocate(allocation);
-        }
-
-        uint64_t ComputeTotalNumOfHeapsForTesting() const {
-            return mAllocator.ComputeTotalNumOfHeapsForTesting();
-        }
-
-      private:
-        PlaceholderResourceHeapAllocator mHeapAllocator;
-        BuddyMemoryAllocator mAllocator;
-    };
-
-    // Verify a single resource allocation in a single heap.
-    TEST(BuddyMemoryAllocatorTests, SingleHeap) {
-        // After one 128 byte resource allocation:
-        //
-        // max block size -> ---------------------------
-        //                   |          A1/H0          |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = heapSize;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
-
-        // Cannot allocate greater than heap size.
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Allocate one 128 byte allocation (same size as heap).
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        // Cannot allocate when allocator is full.
-        invalidAllocation = allocator.Allocate(128);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
+    ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
+        ResultOrError<ResourceMemoryAllocation> result =
+            mAllocator.Allocate(allocationSize, alignment);
+        return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
     }
 
-    // Verify that multiple allocation are created in separate heaps.
-    TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
-        // After two 128 byte resource allocations:
-        //
-        // max block size -> ---------------------------
-        //                   |                         |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //                   |   A1/H0    |    A2/H1   |
-        //                   ---------------------------
-        //
-        constexpr uint64_t maxBlockSize = 256;
-        constexpr uint64_t heapSize = 128;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    void Deallocate(ResourceMemoryAllocation& allocation) { mAllocator.Deallocate(allocation); }
 
-        // Cannot allocate greater than heap size.
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Cannot allocate greater than max block size.
-        invalidAllocation = allocator.Allocate(maxBlockSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Allocate two 128 byte allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // First allocation creates first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // Second allocation creates second heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        // Deallocate both allocations
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
-
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    uint64_t ComputeTotalNumOfHeapsForTesting() const {
+        return mAllocator.ComputeTotalNumOfHeapsForTesting();
     }
 
-    // Verify multiple sub-allocations can re-use heaps.
-    TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
-        // After two 64 byte allocations with 128 byte heaps.
-        //
-        // max block size -> ---------------------------
-        //                   |                         |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //                   |     H0     |     H1     |
-        //                   ---------------------------
-        //                   |  A1 |  A2  |  A3 |      |
-        //                   ---------------------------
-        //
-        constexpr uint64_t maxBlockSize = 256;
-        constexpr uint64_t heapSize = 128;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+  private:
+    PlaceholderResourceHeapAllocator mHeapAllocator;
+    BuddyMemoryAllocator mAllocator;
+};
 
-        // Allocate two 64 byte sub-allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+// Verify a single resource allocation in a single heap.
+TEST(BuddyMemoryAllocatorTests, SingleHeap) {
+    // After one 128 byte resource allocation:
+    //
+    // max block size -> ---------------------------
+    //                   |          A1/H0          |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = heapSize;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
 
-        // First sub-allocation creates first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Allocate one 128 byte allocation (same size as heap).
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
 
-        // Second allocation re-uses first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
 
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Cannot allocate when allocator is full.
+    invalidAllocation = allocator.Allocate(128);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        // Third allocation creates second heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
+}
 
-        // Deallocate all allocations in reverse order.
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
-                  2u);  // A2 pins H0.
+// Verify that multiple allocation are created in separate heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
+    // After two 128 byte resource allocations:
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |   A1/H0    |    A2/H1   |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
 
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        allocator.Deallocate(allocation3);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    // Cannot allocate greater than max block size.
+    invalidAllocation = allocator.Allocate(maxBlockSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    // Allocate two 128 byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    // Deallocate both allocations
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify multiple sub-allocations can re-use heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
+    // After two 64 byte allocations with 128 byte heaps.
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |     H0     |     H1     |
+    //                   ---------------------------
+    //                   |  A1 |  A2  |  A3 |      |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64 byte sub-allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First sub-allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation re-uses first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Third allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    // Deallocate all allocations in reverse order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
+              2u);  // A2 pins H0.
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of various sizes over multiple heaps.
+TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
+    // After three 64 byte allocations and two 128 byte allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |      H2     |    A5/H3    |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |   A4  |     |             |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64-byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A1 and A2 share H0
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A3 creates and fully occupies a new heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+
+    // R5 size forms 64 byte hole after R4.
+    ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
+    ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
+    ASSERT_EQ(allocation5.GetOffset(), 0u);
+    ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
+    ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
+
+    // Deallocate allocations in staggered order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);  // A2 pins H0
+
+    allocator.Deallocate(allocation5);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);  // Released H3
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);  // Released H0
+
+    allocator.Deallocate(allocation4);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H2
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of same sizes with various alignments.
+TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
+    // After three 64 byte and one 128 byte resource allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |     H1     |     H2     |              |
+    //                   -------------------------------------------------------
+    //                   |  A1  |     |  A2  |     |  A3  |  A4 |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation2.GetOffset(), 0u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
+    ASSERT_EQ(allocation4.GetOffset(), 64u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify resource sub-allocation of various sizes with same alignments.
+TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
+    // After two 64 byte and two 128 byte resource allocations:
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |    A4/H2   |              |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |            |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t alignment = 64;
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Reuses H0
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify allocating a very large resource does not overflow.
+TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t largeBlock = (1ull << 63) + 1;
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+}
+
+// Verify resource heaps will be reused from a pool.
+TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
+
+    PlaceholderResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
+
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
+
+    constexpr uint32_t kNumOfAllocations = 100;
+
+    // Allocate |kNumOfAllocations|.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
     }
 
-    // Verify resource sub-allocation of various sizes over multiple heaps.
-    TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
-        // After three 64 byte allocations and two 128 byte allocations.
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |    A3/H1   |      H2     |    A5/H3    |
-        //                   -------------------------------------------------------
-        //                   |  A1 |  A2  |            |   A4  |     |             |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
 
-        // Allocate two 64-byte allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetOffset(), 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
-        ASSERT_EQ(allocation2.GetOffset(), 64u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // A1 and A2 share H0
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // A3 creates and fully occupies a new heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation4.GetOffset(), 0u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
-
-        // R5 size forms 64 byte hole after R4.
-        ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
-        ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
-        ASSERT_EQ(allocation5.GetOffset(), 0u);
-        ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
-        ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
-
-        // Deallocate allocations in staggered order.
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);  // A2 pins H0
-
-        allocator.Deallocate(allocation5);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);  // Released H3
-
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);  // Released H0
-
-        allocator.Deallocate(allocation4);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H2
-
-        allocator.Deallocate(allocation3);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
     }
 
-    // Verify resource sub-allocation of same sizes with various alignments.
-    TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
-        // After three 64 byte and one 128 byte resource allocations.
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |     H1     |     H2     |              |
-        //                   -------------------------------------------------------
-        //                   |  A1  |     |  A2  |     |  A3  |  A4 |              |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
 
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetOffset(), 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation2.GetOffset(), 0u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
-        ASSERT_EQ(allocation4.GetOffset(), 64u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+    // Allocate again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
     }
 
-    // Verify resource sub-allocation of various sizes with same alignments.
-    TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
-        // After two 64 byte and two 128 byte resource allocations:
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |    A3/H1   |    A4/H2   |              |
-        //                   -------------------------------------------------------
-        //                   |  A1 |  A2  |            |            |              |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
 
-        constexpr uint64_t alignment = 64;
+// Verify resource heaps that were reused from a pool can be destroyed.
+TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
 
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    PlaceholderResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
 
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
-        ASSERT_EQ(allocation2.GetOffset(), 64u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
+    // of buffers. Otherwise, the heap may be reused if not full.
+    constexpr uint32_t kNumOfHeaps = 10;
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Reuses H0
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation4.GetOffset(), 0u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+    // Allocate |kNumOfHeaps| worth.
+    while (heaps.size() < kNumOfHeaps) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
     }
 
-    // Verify allocating a very large resource does not overflow.
-    TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
 
-        constexpr uint64_t largeBlock = (1ull << 63) + 1;
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
     }
 
-    // Verify resource heaps will be reused from a pool.
-    TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
-        constexpr uint64_t kHeapSize = 128;
-        constexpr uint64_t kMaxBlockSize = 4096;
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
 
-        PlaceholderResourceHeapAllocator heapAllocator;
-        PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
-        PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
-
-        std::set<ResourceHeapBase*> heaps = {};
-        std::vector<ResourceMemoryAllocation> allocations = {};
-
-        constexpr uint32_t kNumOfAllocations = 100;
-
-        // Allocate |kNumOfAllocations|.
-        for (uint32_t i = 0; i < kNumOfAllocations; i++) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            heaps.insert(allocation.GetResourceHeap());
-            allocations.push_back(std::move(allocation));
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-
-        // Return the allocations to the pool.
-        for (ResourceMemoryAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
-
-        // Allocate again reusing the same heaps.
-        for (uint32_t i = 0; i < kNumOfAllocations; i++) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-    }
-
-    // Verify resource heaps that were reused from a pool can be destroyed.
-    TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
-        constexpr uint64_t kHeapSize = 128;
-        constexpr uint64_t kMaxBlockSize = 4096;
-
-        PlaceholderResourceHeapAllocator heapAllocator;
-        PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
-        PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
-
-        std::set<ResourceHeapBase*> heaps = {};
-        std::vector<ResourceMemoryAllocation> allocations = {};
-
-        // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
-        // of buffers. Otherwise, the heap may be reused if not full.
-        constexpr uint32_t kNumOfHeaps = 10;
-
-        // Allocate |kNumOfHeaps| worth.
-        while (heaps.size() < kNumOfHeaps) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            heaps.insert(allocation.GetResourceHeap());
-            allocations.push_back(std::move(allocation));
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-
-        // Return the allocations to the pool.
-        for (ResourceMemoryAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
-
-        // Make sure we can destroy the remaining heaps.
-        poolAllocator.DestroyPool();
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-    }
+    // Make sure we can destroy the remaining heaps.
+    poolAllocator.DestroyPool();
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/CommandAllocatorTests.cpp b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
index cea1922..2c63327 100644
--- a/src/dawn/tests/unittests/CommandAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
@@ -16,493 +16,490 @@
 #include <utility>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/native/CommandAllocator.h"
+#include "gtest/gtest.h"
 
 namespace dawn::native {
 
-    // Definition of the command types used in the tests
-    enum class CommandType {
-        Draw,
-        Pipeline,
-        PushConstants,
-        Big,
-        Small,
-    };
+// Definition of the command types used in the tests
+enum class CommandType {
+    Draw,
+    Pipeline,
+    PushConstants,
+    Big,
+    Small,
+};
 
-    struct CommandDraw {
-        uint32_t first;
-        uint32_t count;
-    };
+struct CommandDraw {
+    uint32_t first;
+    uint32_t count;
+};
 
-    struct CommandPipeline {
-        uint64_t pipeline;
-        uint32_t attachmentPoint;
-    };
+struct CommandPipeline {
+    uint64_t pipeline;
+    uint32_t attachmentPoint;
+};
 
-    struct CommandPushConstants {
-        uint8_t size;
-        uint8_t offset;
-    };
+struct CommandPushConstants {
+    uint8_t size;
+    uint8_t offset;
+};
 
-    constexpr int kBigBufferSize = 65536;
+constexpr int kBigBufferSize = 65536;
 
-    struct CommandBig {
-        uint32_t buffer[kBigBufferSize];
-    };
+struct CommandBig {
+    uint32_t buffer[kBigBufferSize];
+};
 
-    struct CommandSmall {
-        uint16_t data;
-    };
+struct CommandSmall {
+    uint16_t data;
+};
 
-    // Test allocating nothing works
-    TEST(CommandAllocator, DoNothingAllocator) {
-        CommandAllocator allocator;
+// Test allocating nothing works
+TEST(CommandAllocator, DoNothingAllocator) {
+    CommandAllocator allocator;
+}
+
+// Test iterating over nothing works
+TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
+    CommandAllocator allocator;
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test basic usage of allocator + iterator
+TEST(CommandAllocator, Basic) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
     }
 
-    // Test iterating over nothing works
-    TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
-        CommandAllocator allocator;
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Test basic usage of allocator + iterator
-    TEST(CommandAllocator, Basic) {
-        CommandAllocator allocator;
-
-        uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
-        uint32_t myAttachmentPoint = 2;
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
-
-        {
-            CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-            pipeline->pipeline = myPipeline;
-            pipeline->attachmentPoint = myAttachmentPoint;
-
-            CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Pipeline);
-
-            CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    // Test basic usage of allocator + iterator with data
-    TEST(CommandAllocator, BasicWithData) {
-        CommandAllocator allocator;
-
-        uint8_t mySize = 8;
-        uint8_t myOffset = 3;
-        uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
-
-        {
-            CommandPushConstants* pushConstants =
-                allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
-            pushConstants->size = mySize;
-            pushConstants->offset = myOffset;
-
-            uint32_t* values = allocator.AllocateData<uint32_t>(5);
-            for (size_t i = 0; i < 5; i++) {
-                values[i] = myValues[i];
-            }
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::PushConstants);
-
-            CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
-            ASSERT_EQ(pushConstants->size, mySize);
-            ASSERT_EQ(pushConstants->offset, myOffset);
-
-            uint32_t* values = iterator.NextData<uint32_t>(5);
-            for (size_t i = 0; i < 5; i++) {
-                ASSERT_EQ(values[i], myValues[i]);
-            }
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    // Test basic iterating several times
-    TEST(CommandAllocator, MultipleIterations) {
-        CommandAllocator allocator;
-
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
-
-        {
-            CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            // First iteration
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            // Second iteration
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-    // Test large commands work
-    TEST(CommandAllocator, LargeCommands) {
-        CommandAllocator allocator;
-
-        const int kCommandCount = 5;
-
-        uint32_t count = 0;
-        for (int i = 0; i < kCommandCount; i++) {
-            CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
-            for (int j = 0; j < kBigBufferSize; j++) {
-                big->buffer[j] = count++;
-            }
-        }
-
+    {
         CommandIterator iterator(std::move(allocator));
         CommandType type;
-        count = 0;
-        int numCommands = 0;
-        while (iterator.NextCommandId(&type)) {
-            ASSERT_EQ(type, CommandType::Big);
 
-            CommandBig* big = iterator.NextCommand<CommandBig>();
-            for (int i = 0; i < kBigBufferSize; i++) {
-                ASSERT_EQ(big->buffer[i], count);
-                count++;
-            }
-            numCommands++;
-        }
-        ASSERT_EQ(numCommands, kCommandCount);
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
         iterator.MakeEmptyAsDataWasDestroyed();
     }
+}
 
-    // Test many small commands work
-    TEST(CommandAllocator, ManySmallCommands) {
-        CommandAllocator allocator;
+// Test basic usage of allocator + iterator with data
+TEST(CommandAllocator, BasicWithData) {
+    CommandAllocator allocator;
 
-        // Stay under max representable uint16_t
-        const int kCommandCount = 50000;
+    uint8_t mySize = 8;
+    uint8_t myOffset = 3;
+    uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
 
-        uint16_t count = 0;
-        for (int i = 0; i < kCommandCount; i++) {
-            CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
-            small->data = count++;
+    {
+        CommandPushConstants* pushConstants =
+            allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
+        pushConstants->size = mySize;
+        pushConstants->offset = myOffset;
+
+        uint32_t* values = allocator.AllocateData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            values[i] = myValues[i];
         }
+    }
 
+    {
         CommandIterator iterator(std::move(allocator));
         CommandType type;
-        count = 0;
-        int numCommands = 0;
-        while (iterator.NextCommandId(&type)) {
-            ASSERT_EQ(type, CommandType::Small);
 
-            CommandSmall* small = iterator.NextCommand<CommandSmall>();
-            ASSERT_EQ(small->data, count);
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::PushConstants);
+
+        CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
+        ASSERT_EQ(pushConstants->size, mySize);
+        ASSERT_EQ(pushConstants->offset, myOffset);
+
+        uint32_t* values = iterator.NextData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            ASSERT_EQ(values[i], myValues[i]);
+        }
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+// Test basic iterating several times
+TEST(CommandAllocator, MultipleIterations) {
+    CommandAllocator allocator;
+
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        // First iteration
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        // Second iteration
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+// Test large commands work
+TEST(CommandAllocator, LargeCommands) {
+    CommandAllocator allocator;
+
+    const int kCommandCount = 5;
+
+    uint32_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
+        for (int j = 0; j < kBigBufferSize; j++) {
+            big->buffer[j] = count++;
+        }
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Big);
+
+        CommandBig* big = iterator.NextCommand<CommandBig>();
+        for (int i = 0; i < kBigBufferSize; i++) {
+            ASSERT_EQ(big->buffer[i], count);
             count++;
-            numCommands++;
         }
-        ASSERT_EQ(numCommands, kCommandCount);
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test many small commands work
+TEST(CommandAllocator, ManySmallCommands) {
+    CommandAllocator allocator;
+
+    // Stay under max representable uint16_t
+    const int kCommandCount = 50000;
+
+    uint16_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
+        small->data = count++;
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Small);
+
+        CommandSmall* small = iterator.NextCommand<CommandSmall>();
+        ASSERT_EQ(small->data, count);
+        count++;
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+/*        ________
+ *       /        \
+ *       | POUIC! |
+ *       \_ ______/
+ *         v
+ *    ()_()
+ *    (O.o)
+ *    (> <)o
+ */
+
+// Test usage of iterator.Reset
+TEST(CommandAllocator, IteratorReset) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        iterator.Reset();
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
         iterator.MakeEmptyAsDataWasDestroyed();
     }
+}
 
-    /*        ________
-     *       /        \
-     *       | POUIC! |
-     *       \_ ______/
-     *         v
-     *    ()_()
-     *    (O.o)
-     *    (> <)o
-     */
-
-    // Test usage of iterator.Reset
-    TEST(CommandAllocator, IteratorReset) {
+// Test iterating empty iterators
+TEST(CommandAllocator, EmptyIterator) {
+    {
         CommandAllocator allocator;
+        CommandIterator iterator(std::move(allocator));
 
-        uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
-        uint32_t myAttachmentPoint = 2;
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
+        CommandType type;
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
-        {
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandAllocator allocator;
+        CommandIterator iterator1(std::move(allocator));
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandIterator iterator1;
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+template <size_t A>
+struct alignas(A) AlignedStruct {
+    char placeholder;
+};
+
+// Test for overflows in Allocate's computations, size 1 variant
+TEST(CommandAllocator, AllocationOverflow_1) {
+    CommandAllocator allocator;
+    AlignedStruct<1>* data =
+        allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 2 variant
+TEST(CommandAllocator, AllocationOverflow_2) {
+    CommandAllocator allocator;
+    AlignedStruct<2>* data =
+        allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 4 variant
+TEST(CommandAllocator, AllocationOverflow_4) {
+    CommandAllocator allocator;
+    AlignedStruct<4>* data =
+        allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 8 variant
+TEST(CommandAllocator, AllocationOverflow_8) {
+    CommandAllocator allocator;
+    AlignedStruct<8>* data =
+        allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
+    ASSERT_EQ(data, nullptr);
+}
+
+template <int DefaultValue>
+struct IntWithDefault {
+    IntWithDefault() : value(DefaultValue) {}
+
+    int value;
+};
+
+// Test that the allcator correctly defaults initalizes data for Allocate
+TEST(CommandAllocator, AllocateDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
+    ASSERT_EQ(int42->value, 42);
+
+    IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
+    ASSERT_EQ(int43->value, 43);
+
+    IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
+    ASSERT_EQ(int44->value, 44);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test that the allocator correctly default-initalizes data for AllocateData
+TEST(CommandAllocator, AllocateDataDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
+    ASSERT_EQ(int33[0].value, 33);
+
+    IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
+    ASSERT_EQ(int34[0].value, 34);
+    ASSERT_EQ(int34[0].value, 34);
+
+    IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
+    ASSERT_EQ(int35[0].value, 35);
+    ASSERT_EQ(int35[1].value, 35);
+    ASSERT_EQ(int35[2].value, 35);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Tests flattening of multiple CommandAllocators into a single CommandIterator using
+// AcquireCommandBlocks.
+TEST(CommandAllocator, AcquireCommandBlocks) {
+    constexpr size_t kNumAllocators = 2;
+    constexpr size_t kNumCommandsPerAllocator = 2;
+    const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
+        {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
+        {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
+    };
+    const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}};
+    const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
+    const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
+
+    std::vector<CommandAllocator> allocators(kNumAllocators);
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        CommandAllocator& allocator = allocators[j];
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
             CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-            pipeline->pipeline = myPipeline;
-            pipeline->attachmentPoint = myAttachmentPoint;
+            pipeline->pipeline = pipelines[j][i];
+            pipeline->attachmentPoint = attachmentPoints[j][i];
 
             CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
+            draw->first = firsts[j][i];
+            draw->count = counts[j][i];
         }
+    }
 
-        {
-            CommandIterator iterator(std::move(allocator));
+    CommandIterator iterator;
+    iterator.AcquireCommandBlocks(std::move(allocators));
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
             CommandType type;
-
             bool hasNext = iterator.NextCommandId(&type);
             ASSERT_TRUE(hasNext);
             ASSERT_EQ(type, CommandType::Pipeline);
 
             CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
-
-            iterator.Reset();
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Pipeline);
-
-            pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+            ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
+            ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
 
             hasNext = iterator.NextCommandId(&type);
             ASSERT_TRUE(hasNext);
             ASSERT_EQ(type, CommandType::Draw);
 
             CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
+            ASSERT_EQ(draw->first, firsts[j][i]);
+            ASSERT_EQ(draw->count, counts[j][i]);
         }
     }
-
-    // Test iterating empty iterators
-    TEST(CommandAllocator, EmptyIterator) {
-        {
-            CommandAllocator allocator;
-            CommandIterator iterator(std::move(allocator));
-
-            CommandType type;
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-        {
-            CommandAllocator allocator;
-            CommandIterator iterator1(std::move(allocator));
-            CommandIterator iterator2(std::move(iterator1));
-
-            CommandType type;
-            bool hasNext = iterator2.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator1.MakeEmptyAsDataWasDestroyed();
-            iterator2.MakeEmptyAsDataWasDestroyed();
-        }
-        {
-            CommandIterator iterator1;
-            CommandIterator iterator2(std::move(iterator1));
-
-            CommandType type;
-            bool hasNext = iterator2.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator1.MakeEmptyAsDataWasDestroyed();
-            iterator2.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    template <size_t A>
-    struct alignas(A) AlignedStruct {
-        char placeholder;
-    };
-
-    // Test for overflows in Allocate's computations, size 1 variant
-    TEST(CommandAllocator, AllocationOverflow_1) {
-        CommandAllocator allocator;
-        AlignedStruct<1>* data =
-            allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 2 variant
-    TEST(CommandAllocator, AllocationOverflow_2) {
-        CommandAllocator allocator;
-        AlignedStruct<2>* data =
-            allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 4 variant
-    TEST(CommandAllocator, AllocationOverflow_4) {
-        CommandAllocator allocator;
-        AlignedStruct<4>* data =
-            allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 8 variant
-    TEST(CommandAllocator, AllocationOverflow_8) {
-        CommandAllocator allocator;
-        AlignedStruct<8>* data =
-            allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    template <int DefaultValue>
-    struct IntWithDefault {
-        IntWithDefault() : value(DefaultValue) {
-        }
-
-        int value;
-    };
-
-    // Test that the allcator correctly defaults initalizes data for Allocate
-    TEST(CommandAllocator, AllocateDefaultInitializes) {
-        CommandAllocator allocator;
-
-        IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
-        ASSERT_EQ(int42->value, 42);
-
-        IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
-        ASSERT_EQ(int43->value, 43);
-
-        IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
-        ASSERT_EQ(int44->value, 44);
-
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Test that the allocator correctly default-initalizes data for AllocateData
-    TEST(CommandAllocator, AllocateDataDefaultInitializes) {
-        CommandAllocator allocator;
-
-        IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
-        ASSERT_EQ(int33[0].value, 33);
-
-        IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
-        ASSERT_EQ(int34[0].value, 34);
-        ASSERT_EQ(int34[0].value, 34);
-
-        IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
-        ASSERT_EQ(int35[0].value, 35);
-        ASSERT_EQ(int35[1].value, 35);
-        ASSERT_EQ(int35[2].value, 35);
-
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Tests flattening of multiple CommandAllocators into a single CommandIterator using
-    // AcquireCommandBlocks.
-    TEST(CommandAllocator, AcquireCommandBlocks) {
-        constexpr size_t kNumAllocators = 2;
-        constexpr size_t kNumCommandsPerAllocator = 2;
-        const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
-            {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
-            {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
-        };
-        const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2},
-                                                                                     {3, 4}};
-        const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
-        const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
-
-        std::vector<CommandAllocator> allocators(kNumAllocators);
-        for (size_t j = 0; j < kNumAllocators; ++j) {
-            CommandAllocator& allocator = allocators[j];
-            for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
-                CommandPipeline* pipeline =
-                    allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-                pipeline->pipeline = pipelines[j][i];
-                pipeline->attachmentPoint = attachmentPoints[j][i];
-
-                CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-                draw->first = firsts[j][i];
-                draw->count = counts[j][i];
-            }
-        }
-
-        CommandIterator iterator;
-        iterator.AcquireCommandBlocks(std::move(allocators));
-        for (size_t j = 0; j < kNumAllocators; ++j) {
-            for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
-                CommandType type;
-                bool hasNext = iterator.NextCommandId(&type);
-                ASSERT_TRUE(hasNext);
-                ASSERT_EQ(type, CommandType::Pipeline);
-
-                CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-                ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
-                ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
-
-                hasNext = iterator.NextCommandId(&type);
-                ASSERT_TRUE(hasNext);
-                ASSERT_EQ(type, CommandType::Draw);
-
-                CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-                ASSERT_EQ(draw->first, firsts[j][i]);
-                ASSERT_EQ(draw->count, counts[j][i]);
-            }
-        }
-        CommandType type;
-        ASSERT_FALSE(iterator.NextCommandId(&type));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
+    CommandType type;
+    ASSERT_FALSE(iterator.NextCommandId(&type));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ConcurrentCacheTests.cpp b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
index 9687a3c..1d4f91d 100644
--- a/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
+++ b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
@@ -22,37 +22,31 @@
 #include "gtest/gtest.h"
 
 namespace {
-    class SimpleCachedObject {
-      public:
-        explicit SimpleCachedObject(size_t value) : mValue(value) {
+class SimpleCachedObject {
+  public:
+    explicit SimpleCachedObject(size_t value) : mValue(value) {}
+
+    size_t GetValue() const { return mValue; }
+
+    struct EqualityFunc {
+        bool operator()(const SimpleCachedObject* a, const SimpleCachedObject* b) const {
+            return a->mValue == b->mValue;
         }
-
-        size_t GetValue() const {
-            return mValue;
-        }
-
-        struct EqualityFunc {
-            bool operator()(const SimpleCachedObject* a, const SimpleCachedObject* b) const {
-                return a->mValue == b->mValue;
-            }
-        };
-
-        struct HashFunc {
-            size_t operator()(const SimpleCachedObject* obj) const {
-                return obj->mValue;
-            }
-        };
-
-      private:
-        size_t mValue;
     };
 
+    struct HashFunc {
+        size_t operator()(const SimpleCachedObject* obj) const { return obj->mValue; }
+    };
+
+  private:
+    size_t mValue;
+};
+
 }  // anonymous namespace
 
 class ConcurrentCacheTest : public testing::Test {
   public:
-    ConcurrentCacheTest() : mPool(mPlatform.CreateWorkerTaskPool()), mTaskManager(mPool.get()) {
-    }
+    ConcurrentCacheTest() : mPool(mPlatform.CreateWorkerTaskPool()), mTaskManager(mPool.get()) {}
 
   protected:
     dawn::platform::Platform mPlatform;
diff --git a/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
index 26849bd..bb2f916 100644
--- a/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
+++ b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
@@ -18,76 +18,76 @@
 
 namespace dawn {
 
-    enum class Color : uint32_t {
-        R = 1,
-        G = 2,
-        B = 4,
-        A = 8,
-    };
+enum class Color : uint32_t {
+    R = 1,
+    G = 2,
+    B = 4,
+    A = 8,
+};
 
-    template <>
-    struct IsDawnBitmask<Color> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<Color> {
+    static constexpr bool enable = true;
+};
 
-    TEST(BitmaskTests, BasicOperations) {
-        Color test1 = Color::R | Color::G;
-        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+TEST(BitmaskTests, BasicOperations) {
+    Color test1 = Color::R | Color::G;
+    ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
 
-        Color test2 = test1 ^ (Color::R | Color::A);
-        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+    Color test2 = test1 ^ (Color::R | Color::A);
+    ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
 
-        Color test3 = test2 & Color::A;
-        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+    Color test3 = test2 & Color::A;
+    ASSERT_EQ(8u, static_cast<uint32_t>(test3));
 
-        Color test4 = ~test3;
-        ASSERT_EQ(~uint32_t(8), static_cast<uint32_t>(test4));
+    Color test4 = ~test3;
+    ASSERT_EQ(~uint32_t(8), static_cast<uint32_t>(test4));
+}
+
+TEST(BitmaskTests, AssignOperations) {
+    Color test1 = Color::R;
+    test1 |= Color::G;
+    ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+
+    Color test2 = test1;
+    test2 ^= (Color::R | Color::A);
+    ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+
+    Color test3 = test2;
+    test3 &= Color::A;
+    ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+}
+
+TEST(BitmaskTests, BoolConversion) {
+    bool test1 = Color::R | Color::G;
+    ASSERT_TRUE(test1);
+
+    bool test2 = Color::R & Color::G;
+    ASSERT_FALSE(test2);
+
+    bool test3 = Color::R ^ Color::G;
+    ASSERT_TRUE(test3);
+
+    if (Color::R & ~Color::R) {
+        ASSERT_TRUE(false);
     }
+}
 
-    TEST(BitmaskTests, AssignOperations) {
-        Color test1 = Color::R;
-        test1 |= Color::G;
-        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+TEST(BitmaskTests, ThreeOrs) {
+    Color c = Color::R | Color::G | Color::B;
+    ASSERT_EQ(7u, static_cast<uint32_t>(c));
+}
 
-        Color test2 = test1;
-        test2 ^= (Color::R | Color::A);
-        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
-
-        Color test3 = test2;
-        test3 &= Color::A;
-        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
-    }
-
-    TEST(BitmaskTests, BoolConversion) {
-        bool test1 = Color::R | Color::G;
-        ASSERT_TRUE(test1);
-
-        bool test2 = Color::R & Color::G;
-        ASSERT_FALSE(test2);
-
-        bool test3 = Color::R ^ Color::G;
-        ASSERT_TRUE(test3);
-
-        if (Color::R & ~Color::R) {
-            ASSERT_TRUE(false);
-        }
-    }
-
-    TEST(BitmaskTests, ThreeOrs) {
-        Color c = Color::R | Color::G | Color::B;
-        ASSERT_EQ(7u, static_cast<uint32_t>(c));
-    }
-
-    TEST(BitmaskTests, ZeroOrOneBits) {
-        Color zero = static_cast<Color>(0);
-        ASSERT_TRUE(HasZeroOrOneBits(zero));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::R));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::G));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::B));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::A));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::R | Color::G)));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::G | Color::B)));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::B | Color::A)));
-    }
+TEST(BitmaskTests, ZeroOrOneBits) {
+    Color zero = static_cast<Color>(0);
+    ASSERT_TRUE(HasZeroOrOneBits(zero));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::R));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::G));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::B));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::A));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::R | Color::G)));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::G | Color::B)));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::B | Color::A)));
+}
 
 }  // namespace dawn
diff --git a/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
index b6c6727..fec0376 100644
--- a/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
+++ b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
@@ -18,55 +18,55 @@
 
 namespace dawn::native {
 
-    enum class TestAspect : uint8_t {
-        Color = 1,
-        Depth = 2,
-        Stencil = 4,
-    };
+enum class TestAspect : uint8_t {
+    Color = 1,
+    Depth = 2,
+    Stencil = 4,
+};
 
-    template <>
-    struct EnumBitmaskSize<TestAspect> {
-        static constexpr unsigned value = 3;
-    };
+template <>
+struct EnumBitmaskSize<TestAspect> {
+    static constexpr unsigned value = 3;
+};
 
 }  // namespace dawn::native
 
 namespace dawn {
 
-    template <>
-    struct IsDawnBitmask<dawn::native::TestAspect> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<dawn::native::TestAspect> {
+    static constexpr bool enable = true;
+};
 
 }  // namespace dawn
 
 namespace dawn::native {
 
-    static_assert(EnumBitmaskSize<TestAspect>::value == 3);
+static_assert(EnumBitmaskSize<TestAspect>::value == 3);
 
-    TEST(EnumMaskIteratorTests, None) {
-        for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
-            FAIL();
-            DAWN_UNUSED(aspect);
-        }
+TEST(EnumMaskIteratorTests, None) {
+    for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
+        FAIL();
+        DAWN_UNUSED(aspect);
     }
+}
 
-    TEST(EnumMaskIteratorTests, All) {
-        TestAspect expected[] = {TestAspect::Color, TestAspect::Depth, TestAspect::Stencil};
-        uint32_t i = 0;
-        TestAspect aspects = TestAspect::Color | TestAspect::Depth | TestAspect::Stencil;
-        for (TestAspect aspect : IterateEnumMask(aspects)) {
-            EXPECT_EQ(aspect, expected[i++]);
-        }
+TEST(EnumMaskIteratorTests, All) {
+    TestAspect expected[] = {TestAspect::Color, TestAspect::Depth, TestAspect::Stencil};
+    uint32_t i = 0;
+    TestAspect aspects = TestAspect::Color | TestAspect::Depth | TestAspect::Stencil;
+    for (TestAspect aspect : IterateEnumMask(aspects)) {
+        EXPECT_EQ(aspect, expected[i++]);
     }
+}
 
-    TEST(EnumMaskIteratorTests, Partial) {
-        TestAspect expected[] = {TestAspect::Color, TestAspect::Stencil};
-        uint32_t i = 0;
-        TestAspect aspects = TestAspect::Stencil | TestAspect::Color;
-        for (TestAspect aspect : IterateEnumMask(aspects)) {
-            EXPECT_EQ(aspect, expected[i++]);
-        }
+TEST(EnumMaskIteratorTests, Partial) {
+    TestAspect expected[] = {TestAspect::Color, TestAspect::Stencil};
+    uint32_t i = 0;
+    TestAspect aspects = TestAspect::Stencil | TestAspect::Color;
+    for (TestAspect aspect : IterateEnumMask(aspects)) {
+        EXPECT_EQ(aspect, expected[i++]);
     }
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ErrorTests.cpp b/src/dawn/tests/unittests/ErrorTests.cpp
index f856613..8a3f568 100644
--- a/src/dawn/tests/unittests/ErrorTests.cpp
+++ b/src/dawn/tests/unittests/ErrorTests.cpp
@@ -18,346 +18,348 @@
 #include "dawn/native/ErrorData.h"
 #include "gtest/gtest.h"
 
-namespace dawn::native { namespace {
+namespace dawn::native {
+namespace {
 
-    int placeholderSuccess = 0xbeef;
-    const char* placeholderErrorMessage = "I am an error message :3";
+int placeholderSuccess = 0xbeef;
+const char* placeholderErrorMessage = "I am an error message :3";
 
-    // Check returning a success MaybeError with {};
-    TEST(ErrorTests, Error_Success) {
-        auto ReturnSuccess = []() -> MaybeError { return {}; };
+// Check returning a success MaybeError with {};
+TEST(ErrorTests, Error_Success) {
+    auto ReturnSuccess = []() -> MaybeError { return {}; };
 
-        MaybeError result = ReturnSuccess();
-        ASSERT_TRUE(result.IsSuccess());
-    }
+    MaybeError result = ReturnSuccess();
+    ASSERT_TRUE(result.IsSuccess());
+}
 
-    // Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
-    TEST(ErrorTests, Error_Error) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
+TEST(ErrorTests, Error_Error) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        MaybeError result = ReturnError();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = ReturnError();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check returning a success ResultOrError with an implicit conversion
-    TEST(ErrorTests, ResultOrError_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check returning a success ResultOrError with an implicit conversion
+TEST(ErrorTests, ResultOrError_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        ResultOrError<int*> result = ReturnSuccess();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = ReturnSuccess();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
-    TEST(ErrorTests, ResultOrError_Error) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
+TEST(ErrorTests, ResultOrError_Error) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        ResultOrError<int*> result = ReturnError();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = ReturnError();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY handles successes correctly.
-    TEST(ErrorTests, TRY_Success) {
-        auto ReturnSuccess = []() -> MaybeError { return {}; };
+// Check DAWN_TRY handles successes correctly.
+TEST(ErrorTests, TRY_Success) {
+    auto ReturnSuccess = []() -> MaybeError { return {}; };
 
-        // We need to check that DAWN_TRY doesn't return on successes
-        bool tryReturned = true;
+    // We need to check that DAWN_TRY doesn't return on successes
+    bool tryReturned = true;
 
-        auto Try = [ReturnSuccess, &tryReturned]() -> MaybeError {
-            DAWN_TRY(ReturnSuccess());
-            tryReturned = false;
-            return {};
-        };
+    auto Try = [ReturnSuccess, &tryReturned]() -> MaybeError {
+        DAWN_TRY(ReturnSuccess());
+        tryReturned = false;
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-    }
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+}
 
-    // Check DAWN_TRY handles errors correctly.
-    TEST(ErrorTests, TRY_Error) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY handles errors correctly.
+TEST(ErrorTests, TRY_Error) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            DAWN_TRY(ReturnError());
-            // DAWN_TRY should return before this point
-            EXPECT_FALSE(true);
-            return {};
-        };
+    auto Try = [ReturnError]() -> MaybeError {
+        DAWN_TRY(ReturnError());
+        // DAWN_TRY should return before this point
+        EXPECT_FALSE(true);
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY adds to the backtrace.
-    TEST(ErrorTests, TRY_AddsToBacktrace) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY adds to the backtrace.
+TEST(ErrorTests, TRY_AddsToBacktrace) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto SingleTry = [ReturnError]() -> MaybeError {
-            DAWN_TRY(ReturnError());
-            return {};
-        };
+    auto SingleTry = [ReturnError]() -> MaybeError {
+        DAWN_TRY(ReturnError());
+        return {};
+    };
 
-        auto DoubleTry = [SingleTry]() -> MaybeError {
-            DAWN_TRY(SingleTry());
-            return {};
-        };
+    auto DoubleTry = [SingleTry]() -> MaybeError {
+        DAWN_TRY(SingleTry());
+        return {};
+    };
 
-        MaybeError singleResult = SingleTry();
-        ASSERT_TRUE(singleResult.IsError());
+    MaybeError singleResult = SingleTry();
+    ASSERT_TRUE(singleResult.IsError());
 
-        MaybeError doubleResult = DoubleTry();
-        ASSERT_TRUE(doubleResult.IsError());
+    MaybeError doubleResult = DoubleTry();
+    ASSERT_TRUE(doubleResult.IsError());
 
-        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
-        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+    std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+    std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
 
-        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
-    }
+    ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+}
 
-    // Check DAWN_TRY_ASSIGN handles successes correctly.
-    TEST(ErrorTests, TRY_RESULT_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check DAWN_TRY_ASSIGN handles successes correctly.
+TEST(ErrorTests, TRY_RESULT_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        // We need to check that DAWN_TRY doesn't return on successes
-        bool tryReturned = true;
+    // We need to check that DAWN_TRY doesn't return on successes
+    bool tryReturned = true;
 
-        auto Try = [ReturnSuccess, &tryReturned]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnSuccess());
-            tryReturned = false;
+    auto Try = [ReturnSuccess, &tryReturned]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnSuccess());
+        tryReturned = false;
 
-            EXPECT_EQ(result, &placeholderSuccess);
-            return result;
-        };
+        EXPECT_EQ(result, &placeholderSuccess);
+        return result;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check DAWN_TRY_ASSIGN handles errors correctly.
-    TEST(ErrorTests, TRY_RESULT_Error) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN handles errors correctly.
+TEST(ErrorTests, TRY_RESULT_Error) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY should return before this point
-            EXPECT_FALSE(true);
-            return &placeholderSuccess;
-        };
+        // DAWN_TRY should return before this point
+        EXPECT_FALSE(true);
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY_ASSIGN adds to the backtrace.
-    TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN adds to the backtrace.
+TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
-            DAWN_TRY(ReturnError());
-            return &placeholderSuccess;
-        };
+    auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
+        DAWN_TRY(ReturnError());
+        return &placeholderSuccess;
+    };
 
-        auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
-            DAWN_TRY(SingleTry());
-            return &placeholderSuccess;
-        };
+    auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
+        DAWN_TRY(SingleTry());
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> singleResult = SingleTry();
-        ASSERT_TRUE(singleResult.IsError());
+    ResultOrError<int*> singleResult = SingleTry();
+    ASSERT_TRUE(singleResult.IsError());
 
-        ResultOrError<int*> doubleResult = DoubleTry();
-        ASSERT_TRUE(doubleResult.IsError());
+    ResultOrError<int*> doubleResult = DoubleTry();
+    ASSERT_TRUE(doubleResult.IsError());
 
-        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
-        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+    std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+    std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
 
-        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
-    }
+    ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+}
 
-    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
-    TEST(ErrorTests, TRY_RESULT_ConversionToError) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+TEST(ErrorTests, TRY_RESULT_ConversionToError) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> MaybeError {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            return {};
-        };
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
-    // Version without Result<E*, T*>
-    TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
-        auto ReturnError = []() -> ResultOrError<int> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+// Version without Result<E*, T*>
+TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
+    auto ReturnError = []() -> ResultOrError<int> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            int result = 0;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> MaybeError {
+        int result = 0;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            return {};
-        };
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY_ASSIGN handles successes correctly.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check DAWN_TRY_ASSIGN handles successes correctly.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
-        // cleanup is not called.
-        bool tryReturned = true;
-        bool tryCleanup = false;
+    // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
+    // cleanup is not called.
+    bool tryReturned = true;
+    bool tryCleanup = false;
 
-        auto Try = [ReturnSuccess, &tryReturned, &tryCleanup]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
-            tryReturned = false;
+    auto Try = [ReturnSuccess, &tryReturned, &tryCleanup]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
+        tryReturned = false;
 
-            EXPECT_EQ(result, &placeholderSuccess);
-            return result;
-        };
+        EXPECT_EQ(result, &placeholderSuccess);
+        return result;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-        ASSERT_FALSE(tryCleanup);
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+    ASSERT_FALSE(tryCleanup);
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check DAWN_TRY_ASSIGN handles cleanups.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN handles cleanups.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
-        bool tryCleanup = false;
+    // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
+    bool tryCleanup = false;
 
-        auto Try = [ReturnError, &tryCleanup]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), { tryCleanup = true; });
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError, &tryCleanup]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), { tryCleanup = true; });
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
-            EXPECT_FALSE(true);
-            return &placeholderSuccess;
-        };
+        // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+        EXPECT_FALSE(true);
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-        ASSERT_TRUE(tryCleanup);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+    ASSERT_TRUE(tryCleanup);
+}
 
-    // Check DAWN_TRY_ASSIGN can override return value when needed.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN can override return value when needed.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> bool {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), {}, true);
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> bool {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), {}, true);
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
-            EXPECT_FALSE(true);
-            return false;
-        };
+        // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+        EXPECT_FALSE(true);
+        return false;
+    };
 
-        bool result = Try();
-        ASSERT_TRUE(result);
-    }
+    bool result = Try();
+    ASSERT_TRUE(result);
+}
 
-    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
-    // Check DAWN_TRY handles errors correctly.
-    TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+// Check DAWN_TRY handles errors correctly.
+TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int*> {
-            DAWN_TRY(ReturnError());
-            return &placeholderSuccess;
-        };
+    auto Try = [ReturnError]() -> ResultOrError<int*> {
+        DAWN_TRY(ReturnError());
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
-    // Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
-    TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+// Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
+TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int> {
-            DAWN_TRY(ReturnError());
-            return 42;
-        };
+    auto Try = [ReturnError]() -> ResultOrError<int> {
+        DAWN_TRY(ReturnError());
+        return 42;
+    };
 
-        ResultOrError<int> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::native::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/FeatureTests.cpp b/src/dawn/tests/unittests/FeatureTests.cpp
index e263485..9fb6248 100644
--- a/src/dawn/tests/unittests/FeatureTests.cpp
+++ b/src/dawn/tests/unittests/FeatureTests.cpp
@@ -24,8 +24,7 @@
     FeatureTests()
         : testing::Test(),
           mInstanceBase(dawn::native::InstanceBase::Create()),
-          mAdapterBase(mInstanceBase.Get()) {
-    }
+          mAdapterBase(mInstanceBase.Get()) {}
 
     std::vector<wgpu::FeatureName> GetAllFeatureNames() {
         std::vector<wgpu::FeatureName> allFeatureNames(kTotalFeaturesCount);
diff --git a/src/dawn/tests/unittests/GPUInfoTests.cpp b/src/dawn/tests/unittests/GPUInfoTests.cpp
index 60e2190..42f12cb 100644
--- a/src/dawn/tests/unittests/GPUInfoTests.cpp
+++ b/src/dawn/tests/unittests/GPUInfoTests.cpp
@@ -17,11 +17,11 @@
 #include "dawn/common/GPUInfo.h"
 
 namespace {
-    const PCIVendorID vendorID = 0x8086;
-    const gpu_info::D3DDriverVersion version1 = {20, 19, 15, 5107};
-    const gpu_info::D3DDriverVersion version2 = {21, 20, 16, 5077};
-    const gpu_info::D3DDriverVersion version3 = {27, 20, 100, 9946};
-    const gpu_info::D3DDriverVersion version4 = {27, 20, 101, 2003};
+const PCIVendorID vendorID = 0x8086;
+const gpu_info::D3DDriverVersion version1 = {20, 19, 15, 5107};
+const gpu_info::D3DDriverVersion version2 = {21, 20, 16, 5077};
+const gpu_info::D3DDriverVersion version3 = {27, 20, 100, 9946};
+const gpu_info::D3DDriverVersion version4 = {27, 20, 101, 2003};
 }  // anonymous namespace
 
 TEST(GPUInfo, CompareD3DDriverVersion) {
diff --git a/src/dawn/tests/unittests/GetProcAddressTests.cpp b/src/dawn/tests/unittests/GetProcAddressTests.cpp
index 50840c2..a49c042 100644
--- a/src/dawn/tests/unittests/GetProcAddressTests.cpp
+++ b/src/dawn/tests/unittests/GetProcAddressTests.cpp
@@ -26,149 +26,148 @@
 
 namespace {
 
-    // dawn_wire and dawn_native contain duplicated code for the handling of GetProcAddress
-    // so we run the tests against both implementations. This enum is used as a test parameters to
-    // know which implementation to test.
-    enum class DawnFlavor {
-        Native,
-        Wire,
-    };
+// dawn_wire and dawn_native contain duplicated code for the handling of GetProcAddress
+// so we run the tests against both implementations. This enum is used as a test parameters to
+// know which implementation to test.
+enum class DawnFlavor {
+    Native,
+    Wire,
+};
 
-    std::ostream& operator<<(std::ostream& stream, DawnFlavor flavor) {
-        switch (flavor) {
-            case DawnFlavor::Native:
-                stream << "dawn_native";
-                break;
+std::ostream& operator<<(std::ostream& stream, DawnFlavor flavor) {
+    switch (flavor) {
+        case DawnFlavor::Native:
+            stream << "dawn_native";
+            break;
 
-            case DawnFlavor::Wire:
-                stream << "dawn_wire";
+        case DawnFlavor::Wire:
+            stream << "dawn_wire";
+            break;
+
+        default:
+            UNREACHABLE();
+            break;
+    }
+    return stream;
+}
+
+class GetProcAddressTests : public testing::TestWithParam<DawnFlavor> {
+  public:
+    GetProcAddressTests()
+        : testing::TestWithParam<DawnFlavor>(),
+          mNativeInstance(dawn::native::InstanceBase::Create()),
+          mNativeAdapter(mNativeInstance.Get()) {}
+
+    void SetUp() override {
+        switch (GetParam()) {
+            case DawnFlavor::Native: {
+                mDevice = wgpu::Device::Acquire(
+                    reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
+                mProcs = dawn::native::GetProcs();
                 break;
+            }
+
+            case DawnFlavor::Wire: {
+                mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+                dawn::wire::WireClientDescriptor clientDesc = {};
+                clientDesc.serializer = mC2sBuf.get();
+                mWireClient = std::make_unique<dawn::wire::WireClient>(clientDesc);
+
+                mDevice = wgpu::Device::Acquire(mWireClient->ReserveDevice().device);
+                mProcs = dawn::wire::client::GetProcs();
+                break;
+            }
 
             default:
                 UNREACHABLE();
                 break;
         }
-        return stream;
+
+        dawnProcSetProcs(&mProcs);
     }
 
-    class GetProcAddressTests : public testing::TestWithParam<DawnFlavor> {
-      public:
-        GetProcAddressTests()
-            : testing::TestWithParam<DawnFlavor>(),
-              mNativeInstance(dawn::native::InstanceBase::Create()),
-              mNativeAdapter(mNativeInstance.Get()) {
-        }
-
-        void SetUp() override {
-            switch (GetParam()) {
-                case DawnFlavor::Native: {
-                    mDevice = wgpu::Device::Acquire(
-                        reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
-                    mProcs = dawn::native::GetProcs();
-                    break;
-                }
-
-                case DawnFlavor::Wire: {
-                    mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
-
-                    dawn::wire::WireClientDescriptor clientDesc = {};
-                    clientDesc.serializer = mC2sBuf.get();
-                    mWireClient = std::make_unique<dawn::wire::WireClient>(clientDesc);
-
-                    mDevice = wgpu::Device::Acquire(mWireClient->ReserveDevice().device);
-                    mProcs = dawn::wire::client::GetProcs();
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            dawnProcSetProcs(&mProcs);
-        }
-
-        void TearDown() override {
-            // Destroy the device before freeing the instance or the wire client in the destructor
-            mDevice = wgpu::Device();
-        }
-
-      protected:
-        Ref<dawn::native::InstanceBase> mNativeInstance;
-        dawn::native::null::Adapter mNativeAdapter;
-
-        std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
-        std::unique_ptr<dawn::wire::WireClient> mWireClient;
-
-        wgpu::Device mDevice;
-        DawnProcTable mProcs;
-    };
-
-    // Test GetProcAddress with and without devices on some valid examples
-    TEST_P(GetProcAddressTests, ValidExamples) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceCreateBuffer"),
-                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceCreateBuffer"),
-                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuQueueSubmit"),
-                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuQueueSubmit"),
-                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+    void TearDown() override {
+        // Destroy the device before freeing the instance or the wire client in the destructor
+        mDevice = wgpu::Device();
     }
 
-    // Test GetProcAddress with and without devices on nullptr procName
-    TEST_P(GetProcAddressTests, Nullptr) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, nullptr), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), nullptr), nullptr);
+  protected:
+    Ref<dawn::native::InstanceBase> mNativeInstance;
+    dawn::native::null::Adapter mNativeAdapter;
+
+    std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+    std::unique_ptr<dawn::wire::WireClient> mWireClient;
+
+    wgpu::Device mDevice;
+    DawnProcTable mProcs;
+};
+
+// Test GetProcAddress with and without devices on some valid examples
+TEST_P(GetProcAddressTests, ValidExamples) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceCreateBuffer"),
+              reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceCreateBuffer"),
+              reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuQueueSubmit"),
+              reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuQueueSubmit"),
+              reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+}
+
+// Test GetProcAddress with and without devices on nullptr procName
+TEST_P(GetProcAddressTests, Nullptr) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, nullptr), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), nullptr), nullptr);
+}
+
+// Test GetProcAddress with and without devices on some invalid
+TEST_P(GetProcAddressTests, InvalidExamples) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceDoSomething"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceDoSomething"), nullptr);
+
+    // Trigger the condition where lower_bound will return the end of the procMap.
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "zzzzzzz"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "zzzzzzz"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "ZZ"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "ZZ"), nullptr);
+
+    // Some more potential corner cases.
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, ""), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), ""), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "0"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "0"), nullptr);
+}
+
+// Test that GetProcAddress supports freestanding function that are handled specially
+TEST_P(GetProcAddressTests, FreeStandingFunctions) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuGetProcAddress"),
+              reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuGetProcAddress"),
+              reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuCreateInstance"),
+              reinterpret_cast<WGPUProc>(mProcs.createInstance));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuCreateInstance"),
+              reinterpret_cast<WGPUProc>(mProcs.createInstance));
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+                         GetProcAddressTests,
+                         testing::Values(DawnFlavor::Native, DawnFlavor::Wire),
+                         testing::PrintToStringParamName());
+
+TEST(GetProcAddressInternalTests, CheckDawnNativeProcMapOrder) {
+    std::vector<const char*> names = dawn::native::GetProcMapNamesForTesting();
+    for (size_t i = 1; i < names.size(); i++) {
+        ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
     }
+}
 
-    // Test GetProcAddress with and without devices on some invalid
-    TEST_P(GetProcAddressTests, InvalidExamples) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceDoSomething"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceDoSomething"), nullptr);
-
-        // Trigger the condition where lower_bound will return the end of the procMap.
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "zzzzzzz"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "zzzzzzz"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "ZZ"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "ZZ"), nullptr);
-
-        // Some more potential corner cases.
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, ""), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), ""), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "0"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "0"), nullptr);
+TEST(GetProcAddressInternalTests, CheckDawnWireClientProcMapOrder) {
+    std::vector<const char*> names = dawn::wire::client::GetProcMapNamesForTesting();
+    for (size_t i = 1; i < names.size(); i++) {
+        ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
     }
-
-    // Test that GetProcAddress supports freestanding function that are handled specially
-    TEST_P(GetProcAddressTests, FreeStandingFunctions) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuGetProcAddress"),
-                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuGetProcAddress"),
-                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
-
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuCreateInstance"),
-                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuCreateInstance"),
-                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
-    }
-
-    INSTANTIATE_TEST_SUITE_P(,
-                             GetProcAddressTests,
-                             testing::Values(DawnFlavor::Native, DawnFlavor::Wire),
-                             testing::PrintToStringParamName());
-
-    TEST(GetProcAddressInternalTests, CheckDawnNativeProcMapOrder) {
-        std::vector<const char*> names = dawn::native::GetProcMapNamesForTesting();
-        for (size_t i = 1; i < names.size(); i++) {
-            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
-        }
-    }
-
-    TEST(GetProcAddressInternalTests, CheckDawnWireClientProcMapOrder) {
-        std::vector<const char*> names = dawn::wire::client::GetProcMapNamesForTesting();
-        for (size_t i = 1; i < names.size(); i++) {
-            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
-        }
-    }
+}
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/LinkedListTests.cpp b/src/dawn/tests/unittests/LinkedListTests.cpp
index e63ef0a..1832159 100644
--- a/src/dawn/tests/unittests/LinkedListTests.cpp
+++ b/src/dawn/tests/unittests/LinkedListTests.cpp
@@ -12,16 +12,11 @@
 
 class Node : public LinkNode<Node> {
   public:
-    explicit Node(int id) : id_(id) {
-    }
+    explicit Node(int id) : id_(id) {}
 
-    int id() const {
-        return id_;
-    }
+    int id() const { return id_; }
 
-    void set_id(int id) {
-        id_ = id;
-    }
+    void set_id(int id) { id_ = id; }
 
   private:
     int id_;
@@ -29,8 +24,7 @@
 
 class MultipleInheritanceNodeBase {
   public:
-    MultipleInheritanceNodeBase() : field_taking_up_space_(0) {
-    }
+    MultipleInheritanceNodeBase() : field_taking_up_space_(0) {}
     int field_taking_up_space_;
 };
 
@@ -42,14 +36,11 @@
 
 class MovableNode : public LinkNode<MovableNode> {
   public:
-    explicit MovableNode(int id) : id_(id) {
-    }
+    explicit MovableNode(int id) : id_(id) {}
 
     MovableNode(MovableNode&&) = default;
 
-    int id() const {
-        return id_;
-    }
+    int id() const { return id_; }
 
   private:
     int id_;
diff --git a/src/dawn/tests/unittests/MathTests.cpp b/src/dawn/tests/unittests/MathTests.cpp
index a5ce0f4..d88e858 100644
--- a/src/dawn/tests/unittests/MathTests.cpp
+++ b/src/dawn/tests/unittests/MathTests.cpp
@@ -15,24 +15,24 @@
 #include <cmath>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/EnumClassBitmasks.h"
 #include "dawn/common/Math.h"
 #include "dawn/webgpu_cpp.h"
+#include "gtest/gtest.h"
 
 namespace wgpu {
-    enum class TestEnum {
-        A = 0x1,
-        B = 0x2,
-        C = 0x4,
-    };
+enum class TestEnum {
+    A = 0x1,
+    B = 0x2,
+    C = 0x4,
+};
 }  // namespace wgpu
 
 namespace dawn {
-    template <>
-    struct IsDawnBitmask<wgpu::TestEnum> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<wgpu::TestEnum> {
+    static constexpr bool enable = true;
+};
 }  // namespace dawn
 
 // Tests for ScanForward
diff --git a/src/dawn/tests/unittests/PerStageTests.cpp b/src/dawn/tests/unittests/PerStageTests.cpp
index ccfdee4..4c39618 100644
--- a/src/dawn/tests/unittests/PerStageTests.cpp
+++ b/src/dawn/tests/unittests/PerStageTests.cpp
@@ -18,74 +18,74 @@
 
 namespace dawn::native {
 
-    // Tests for StageBit
-    TEST(PerStage, StageBit) {
-        ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
-        ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
-        ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
+// Tests for StageBit
+TEST(PerStage, StageBit) {
+    ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
+    ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
+    ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
+}
+
+// Basic test for the PerStage container
+TEST(PerStage, PerStage) {
+    PerStage<int> data;
+
+    // Store data using wgpu::ShaderStage
+    data[SingleShaderStage::Vertex] = 42;
+    data[SingleShaderStage::Fragment] = 3;
+    data[SingleShaderStage::Compute] = -1;
+
+    // Load it using wgpu::ShaderStage
+    ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
+    ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
+    ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
+}
+
+// Test IterateStages with kAllStages
+TEST(PerStage, IterateAllStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
+
+    for (auto stage : IterateStages(kAllStages)) {
+        counts[stage]++;
     }
 
-    // Basic test for the PerStage container
-    TEST(PerStage, PerStage) {
-        PerStage<int> data;
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
+}
 
-        // Store data using wgpu::ShaderStage
-        data[SingleShaderStage::Vertex] = 42;
-        data[SingleShaderStage::Fragment] = 3;
-        data[SingleShaderStage::Compute] = -1;
+// Test IterateStages with one stage
+TEST(PerStage, IterateOneStage) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
 
-        // Load it using wgpu::ShaderStage
-        ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
-        ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
-        ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
+        counts[stage]++;
     }
 
-    // Test IterateStages with kAllStages
-    TEST(PerStage, IterateAllStages) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
 
-        for (auto stage : IterateStages(kAllStages)) {
-            counts[stage]++;
-        }
+// Test IterateStages with no stage
+TEST(PerStage, IterateNoStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
 
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
+        counts[stage]++;
     }
 
-    // Test IterateStages with one stage
-    TEST(PerStage, IterateOneStage) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
-
-        for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
-            counts[stage]++;
-        }
-
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
-    }
-
-    // Test IterateStages with no stage
-    TEST(PerStage, IterateNoStages) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
-
-        for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
-            counts[stage]++;
-        }
-
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
-    }
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/PerThreadProcTests.cpp b/src/dawn/tests/unittests/PerThreadProcTests.cpp
index f275456..65d64d6 100644
--- a/src/dawn/tests/unittests/PerThreadProcTests.cpp
+++ b/src/dawn/tests/unittests/PerThreadProcTests.cpp
@@ -27,8 +27,7 @@
   public:
     PerThreadProcTests()
         : mNativeInstance(dawn::native::InstanceBase::Create()),
-          mNativeAdapter(mNativeInstance.Get()) {
-    }
+          mNativeAdapter(mNativeInstance.Get()) {}
     ~PerThreadProcTests() override = default;
 
   protected:
diff --git a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
index a031778..b800500 100644
--- a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
+++ b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
@@ -23,39 +23,31 @@
 
 namespace {
 
-    enum class DestructedClass {
-        Foo,
-        Bar,
-    };
+enum class DestructedClass {
+    Foo,
+    Bar,
+};
 
-    class MockDestructor {
-      public:
-        MOCK_METHOD(void, Call, (void*, DestructedClass));
-    };
+class MockDestructor {
+  public:
+    MOCK_METHOD(void, Call, (void*, DestructedClass));
+};
 
-    std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
+std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
 
-    class PlacementAllocatedTests : public testing::Test {
-        void SetUp() override {
-            mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
-        }
+class PlacementAllocatedTests : public testing::Test {
+    void SetUp() override { mockDestructor = std::make_unique<StrictMock<MockDestructor>>(); }
 
-        void TearDown() override {
-            mockDestructor = nullptr;
-        }
-    };
+    void TearDown() override { mockDestructor = nullptr; }
+};
 
-    struct Foo : PlacementAllocated {
-        virtual ~Foo() {
-            mockDestructor->Call(this, DestructedClass::Foo);
-        }
-    };
+struct Foo : PlacementAllocated {
+    virtual ~Foo() { mockDestructor->Call(this, DestructedClass::Foo); }
+};
 
-    struct Bar : Foo {
-        ~Bar() override {
-            mockDestructor->Call(this, DestructedClass::Bar);
-        }
-    };
+struct Bar : Foo {
+    ~Bar() override { mockDestructor->Call(this, DestructedClass::Bar); }
+};
 }  // namespace
 
 // Test that deletion calls the destructor and does not free memory.
diff --git a/src/dawn/tests/unittests/RefBaseTests.cpp b/src/dawn/tests/unittests/RefBaseTests.cpp
index 0a34aca..dafdcf7 100644
--- a/src/dawn/tests/unittests/RefBaseTests.cpp
+++ b/src/dawn/tests/unittests/RefBaseTests.cpp
@@ -19,99 +19,85 @@
 #include "gmock/gmock.h"
 
 namespace {
-    using Id = uint32_t;
+using Id = uint32_t;
 
-    enum class Action {
-        kReference,
-        kRelease,
-        kAssign,
-        kMarker,
-    };
+enum class Action {
+    kReference,
+    kRelease,
+    kAssign,
+    kMarker,
+};
 
-    struct Event {
-        Action action;
-        Id thisId = 0;
-        Id otherId = 0;
-    };
+struct Event {
+    Action action;
+    Id thisId = 0;
+    Id otherId = 0;
+};
 
-    std::ostream& operator<<(std::ostream& os, const Event& event) {
-        switch (event.action) {
-            case Action::kReference:
-                os << "Reference " << event.thisId;
-                break;
-            case Action::kRelease:
-                os << "Release " << event.thisId;
-                break;
-            case Action::kAssign:
-                os << "Assign " << event.thisId << " <- " << event.otherId;
-                break;
-            case Action::kMarker:
-                os << "Marker " << event.thisId;
-                break;
+std::ostream& operator<<(std::ostream& os, const Event& event) {
+    switch (event.action) {
+        case Action::kReference:
+            os << "Reference " << event.thisId;
+            break;
+        case Action::kRelease:
+            os << "Release " << event.thisId;
+            break;
+        case Action::kAssign:
+            os << "Assign " << event.thisId << " <- " << event.otherId;
+            break;
+        case Action::kMarker:
+            os << "Marker " << event.thisId;
+            break;
+    }
+    return os;
+}
+
+bool operator==(const Event& a, const Event& b) {
+    return a.action == b.action && a.thisId == b.thisId && a.otherId == b.otherId;
+}
+
+using Events = std::vector<Event>;
+
+struct RefTracker {
+    explicit constexpr RefTracker(nullptr_t) : mId(0), mEvents(nullptr) {}
+
+    constexpr RefTracker(const RefTracker& other) = default;
+
+    RefTracker(Id id, Events* events) : mId(id), mEvents(events) {}
+
+    void Reference() const { mEvents->emplace_back(Event{Action::kReference, mId}); }
+
+    void Release() const { mEvents->emplace_back(Event{Action::kRelease, mId}); }
+
+    RefTracker& operator=(const RefTracker& other) {
+        if (mEvents || other.mEvents) {
+            Events* events = mEvents ? mEvents : other.mEvents;
+            events->emplace_back(Event{Action::kAssign, mId, other.mId});
         }
-        return os;
+        mId = other.mId;
+        mEvents = other.mEvents;
+        return *this;
     }
 
-    bool operator==(const Event& a, const Event& b) {
-        return a.action == b.action && a.thisId == b.thisId && a.otherId == b.otherId;
-    }
+    bool operator==(const RefTracker& other) const { return mId == other.mId; }
 
-    using Events = std::vector<Event>;
+    bool operator!=(const RefTracker& other) const { return mId != other.mId; }
 
-    struct RefTracker {
-        explicit constexpr RefTracker(nullptr_t) : mId(0), mEvents(nullptr) {
-        }
+    Id mId;
+    Events* mEvents;
+};
 
-        constexpr RefTracker(const RefTracker& other) = default;
+struct RefTrackerTraits {
+    static constexpr RefTracker kNullValue{nullptr};
 
-        RefTracker(Id id, Events* events) : mId(id), mEvents(events) {
-        }
+    static void Reference(const RefTracker& handle) { handle.Reference(); }
 
-        void Reference() const {
-            mEvents->emplace_back(Event{Action::kReference, mId});
-        }
+    static void Release(const RefTracker& handle) { handle.Release(); }
+};
 
-        void Release() const {
-            mEvents->emplace_back(Event{Action::kRelease, mId});
-        }
+constexpr RefTracker RefTrackerTraits::kNullValue;
 
-        RefTracker& operator=(const RefTracker& other) {
-            if (mEvents || other.mEvents) {
-                Events* events = mEvents ? mEvents : other.mEvents;
-                events->emplace_back(Event{Action::kAssign, mId, other.mId});
-            }
-            mId = other.mId;
-            mEvents = other.mEvents;
-            return *this;
-        }
-
-        bool operator==(const RefTracker& other) const {
-            return mId == other.mId;
-        }
-
-        bool operator!=(const RefTracker& other) const {
-            return mId != other.mId;
-        }
-
-        Id mId;
-        Events* mEvents;
-    };
-
-    struct RefTrackerTraits {
-        static constexpr RefTracker kNullValue{nullptr};
-
-        static void Reference(const RefTracker& handle) {
-            handle.Reference();
-        }
-
-        static void Release(const RefTracker& handle) {
-            handle.Release();
-        }
-    };
-
-    constexpr RefTracker RefTrackerTraits::kNullValue;
-
-    using Ref = RefBase<RefTracker, RefTrackerTraits>;
+using Ref = RefBase<RefTracker, RefTrackerTraits>;
 }  // namespace
 
 TEST(RefBase, Acquire) {
diff --git a/src/dawn/tests/unittests/RefCountedTests.cpp b/src/dawn/tests/unittests/RefCountedTests.cpp
index 503b701..a1c5fca 100644
--- a/src/dawn/tests/unittests/RefCountedTests.cpp
+++ b/src/dawn/tests/unittests/RefCountedTests.cpp
@@ -20,14 +20,11 @@
 
 class RCTest : public RefCounted {
   public:
-    RCTest() : RefCounted() {
-    }
+    RCTest() : RefCounted() {}
 
-    explicit RCTest(uint64_t payload) : RefCounted(payload) {
-    }
+    explicit RCTest(uint64_t payload) : RefCounted(payload) {}
 
-    explicit RCTest(bool* deleted) : mDeleted(deleted) {
-    }
+    explicit RCTest(bool* deleted) : mDeleted(deleted) {}
 
     ~RCTest() override {
         if (mDeleted != nullptr) {
@@ -35,9 +32,7 @@
         }
     }
 
-    RCTest* GetThis() {
-        return this;
-    }
+    RCTest* GetThis() { return this; }
 
   private:
     bool* mDeleted = nullptr;
diff --git a/src/dawn/tests/unittests/ResultTests.cpp b/src/dawn/tests/unittests/ResultTests.cpp
index ded13bf..a588631 100644
--- a/src/dawn/tests/unittests/ResultTests.cpp
+++ b/src/dawn/tests/unittests/ResultTests.cpp
@@ -22,369 +22,365 @@
 
 namespace {
 
-    template <typename T, typename E>
-    void TestError(Result<T, E>* result, E expectedError) {
-        EXPECT_TRUE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
+template <typename T, typename E>
+void TestError(Result<T, E>* result, E expectedError) {
+    EXPECT_TRUE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
 
-        std::unique_ptr<E> storedError = result->AcquireError();
-        EXPECT_EQ(*storedError, expectedError);
-    }
+    std::unique_ptr<E> storedError = result->AcquireError();
+    EXPECT_EQ(*storedError, expectedError);
+}
 
-    template <typename T, typename E>
-    void TestSuccess(Result<T, E>* result, T expectedSuccess) {
-        EXPECT_FALSE(result->IsError());
-        EXPECT_TRUE(result->IsSuccess());
+template <typename T, typename E>
+void TestSuccess(Result<T, E>* result, T expectedSuccess) {
+    EXPECT_FALSE(result->IsError());
+    EXPECT_TRUE(result->IsSuccess());
 
-        const T storedSuccess = result->AcquireSuccess();
-        EXPECT_EQ(storedSuccess, expectedSuccess);
+    const T storedSuccess = result->AcquireSuccess();
+    EXPECT_EQ(storedSuccess, expectedSuccess);
 
-        // Once the success is acquired, result has an empty
-        // payload and is neither in the success nor error state.
-        EXPECT_FALSE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
-    }
+    // Once the success is acquired, result has an empty
+    // payload and is neither in the success nor error state.
+    EXPECT_FALSE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
+}
 
-    static int placeholderError = 0xbeef;
-    static float placeholderSuccess = 42.0f;
-    static const float placeholderConstSuccess = 42.0f;
+static int placeholderError = 0xbeef;
+static float placeholderSuccess = 42.0f;
+static const float placeholderConstSuccess = 42.0f;
 
-    class AClass : public RefCounted {
-      public:
-        int a = 0;
+class AClass : public RefCounted {
+  public:
+    int a = 0;
+};
+
+// Tests using the following overload of TestSuccess make
+// local Ref instances to placeholderSuccessObj. Tests should
+// ensure any local Ref objects made along the way continue
+// to point to placeholderSuccessObj.
+template <typename T, typename E>
+void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
+    EXPECT_FALSE(result->IsError());
+    EXPECT_TRUE(result->IsSuccess());
+
+    // AClass starts with a reference count of 1 and stored
+    // on the stack in the caller. The result parameter should
+    // hold the only other reference to the object.
+    EXPECT_EQ(expectedSuccess->GetRefCountForTesting(), 2u);
+
+    const Ref<T> storedSuccess = result->AcquireSuccess();
+    EXPECT_EQ(storedSuccess.Get(), expectedSuccess);
+
+    // Once the success is acquired, result has an empty
+    // payload and is neither in the success nor error state.
+    EXPECT_FALSE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
+
+    // Once we call AcquireSuccess, result no longer stores
+    // the object. storedSuccess should contain the only other
+    // reference to the object.
+    EXPECT_EQ(storedSuccess->GetRefCountForTesting(), 2u);
+}
+
+// Result<void, E*>
+
+// Test constructing an error Result<void, E>
+TEST(ResultOnlyPointerError, ConstructingError) {
+    Result<void, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<void, E>
+TEST(ResultOnlyPointerError, MovingError) {
+    Result<void, int> result(std::make_unique<int>(placeholderError));
+    Result<void, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<void, E>
+TEST(ResultOnlyPointerError, ReturningError) {
+    auto CreateError = []() -> Result<void, int> {
+        return {std::make_unique<int>(placeholderError)};
     };
 
-    // Tests using the following overload of TestSuccess make
-    // local Ref instances to placeholderSuccessObj. Tests should
-    // ensure any local Ref objects made along the way continue
-    // to point to placeholderSuccessObj.
-    template <typename T, typename E>
-    void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
-        EXPECT_FALSE(result->IsError());
-        EXPECT_TRUE(result->IsSuccess());
+    Result<void, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-        // AClass starts with a reference count of 1 and stored
-        // on the stack in the caller. The result parameter should
-        // hold the only other reference to the object.
-        EXPECT_EQ(expectedSuccess->GetRefCountForTesting(), 2u);
+// Test constructing a success Result<void, E>
+TEST(ResultOnlyPointerError, ConstructingSuccess) {
+    Result<void, int> result;
+    EXPECT_TRUE(result.IsSuccess());
+    EXPECT_FALSE(result.IsError());
+}
 
-        const Ref<T> storedSuccess = result->AcquireSuccess();
-        EXPECT_EQ(storedSuccess.Get(), expectedSuccess);
+// Test moving a success Result<void, E>
+TEST(ResultOnlyPointerError, MovingSuccess) {
+    Result<void, int> result;
+    Result<void, int> movedResult(std::move(result));
+    EXPECT_TRUE(movedResult.IsSuccess());
+    EXPECT_FALSE(movedResult.IsError());
+}
 
-        // Once the success is acquired, result has an empty
-        // payload and is neither in the success nor error state.
-        EXPECT_FALSE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
+// Test returning a success Result<void, E>
+TEST(ResultOnlyPointerError, ReturningSuccess) {
+    auto CreateError = []() -> Result<void, int> { return {}; };
 
-        // Once we call AcquireSuccess, result no longer stores
-        // the object. storedSuccess should contain the only other
-        // reference to the object.
-        EXPECT_EQ(storedSuccess->GetRefCountForTesting(), 2u);
-    }
+    Result<void, int> result = CreateError();
+    EXPECT_TRUE(result.IsSuccess());
+    EXPECT_FALSE(result.IsError());
+}
 
-    // Result<void, E*>
+// Result<T*, E*>
 
-    // Test constructing an error Result<void, E>
-    TEST(ResultOnlyPointerError, ConstructingError) {
-        Result<void, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
+// Test constructing an error Result<T*, E>
+TEST(ResultBothPointer, ConstructingError) {
+    Result<float*, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
 
-    // Test moving an error Result<void, E>
-    TEST(ResultOnlyPointerError, MovingError) {
-        Result<void, int> result(std::make_unique<int>(placeholderError));
-        Result<void, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
+// Test moving an error Result<T*, E>
+TEST(ResultBothPointer, MovingError) {
+    Result<float*, int> result(std::make_unique<int>(placeholderError));
+    Result<float*, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
 
-    // Test returning an error Result<void, E>
-    TEST(ResultOnlyPointerError, ReturningError) {
-        auto CreateError = []() -> Result<void, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<void, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<void, E>
-    TEST(ResultOnlyPointerError, ConstructingSuccess) {
-        Result<void, int> result;
-        EXPECT_TRUE(result.IsSuccess());
-        EXPECT_FALSE(result.IsError());
-    }
-
-    // Test moving a success Result<void, E>
-    TEST(ResultOnlyPointerError, MovingSuccess) {
-        Result<void, int> result;
-        Result<void, int> movedResult(std::move(result));
-        EXPECT_TRUE(movedResult.IsSuccess());
-        EXPECT_FALSE(movedResult.IsError());
-    }
-
-    // Test returning a success Result<void, E>
-    TEST(ResultOnlyPointerError, ReturningSuccess) {
-        auto CreateError = []() -> Result<void, int> { return {}; };
-
-        Result<void, int> result = CreateError();
-        EXPECT_TRUE(result.IsSuccess());
-        EXPECT_FALSE(result.IsError());
-    }
-
-    // Result<T*, E*>
-
-    // Test constructing an error Result<T*, E>
-    TEST(ResultBothPointer, ConstructingError) {
-        Result<float*, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<T*, E>
-    TEST(ResultBothPointer, MovingError) {
-        Result<float*, int> result(std::make_unique<int>(placeholderError));
-        Result<float*, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<T*, E>
-    TEST(ResultBothPointer, ReturningError) {
-        auto CreateError = []() -> Result<float*, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<float*, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<T*, E>
-    TEST(ResultBothPointer, ConstructingSuccess) {
-        Result<float*, int> result(&placeholderSuccess);
-        TestSuccess(&result, &placeholderSuccess);
-    }
-
-    // Test moving a success Result<T*, E>
-    TEST(ResultBothPointer, MovingSuccess) {
-        Result<float*, int> result(&placeholderSuccess);
-        Result<float*, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &placeholderSuccess);
-    }
-
-    // Test returning a success Result<T*, E>
-    TEST(ResultBothPointer, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
-
-        Result<float*, int*> result = CreateSuccess();
-        TestSuccess(&result, &placeholderSuccess);
-    }
-
-    // Tests converting from a Result<TChild*, E>
-    TEST(ResultBothPointer, ConversionFromChildClass) {
-        struct T {
-            int a;
-        };
-        struct TChild : T {};
-
-        TChild child;
-        T* childAsT = &child;
-        {
-            Result<T*, int> result(&child);
-            TestSuccess(&result, childAsT);
-        }
-        {
-            Result<TChild*, int> resultChild(&child);
-            Result<T*, int> result(std::move(resultChild));
-            TestSuccess(&result, childAsT);
-        }
-        {
-            Result<TChild*, int> resultChild(&child);
-            Result<T*, int> result = std::move(resultChild);
-            TestSuccess(&result, childAsT);
-        }
-    }
-
-    // Result<const T*, E>
-
-    // Test constructing an error Result<const T*, E>
-    TEST(ResultBothPointerWithConstResult, ConstructingError) {
-        Result<const float*, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<const T*, E>
-    TEST(ResultBothPointerWithConstResult, MovingError) {
-        Result<const float*, int> result(std::make_unique<int>(placeholderError));
-        Result<const float*, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ReturningError) {
-        auto CreateError = []() -> Result<const float*, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<const float*, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
-        Result<const float*, int> result(&placeholderConstSuccess);
-        TestSuccess(&result, &placeholderConstSuccess);
-    }
-
-    // Test moving a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, MovingSuccess) {
-        Result<const float*, int> result(&placeholderConstSuccess);
-        Result<const float*, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &placeholderConstSuccess);
-    }
-
-    // Test returning a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<const float*, int> {
-            return {&placeholderConstSuccess};
-        };
-
-        Result<const float*, int> result = CreateSuccess();
-        TestSuccess(&result, &placeholderConstSuccess);
-    }
-
-    // Result<Ref<T>, E>
-
-    // Test constructing an error Result<Ref<T>, E>
-    TEST(ResultRefT, ConstructingError) {
-        Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<Ref<T>, E>
-    TEST(ResultRefT, MovingError) {
-        Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
-        Result<Ref<AClass>, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<Ref<T>, E>
-    TEST(ResultRefT, ReturningError) {
-        auto CreateError = []() -> Result<Ref<AClass>, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<Ref<AClass>, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<Ref<T>, E>
-    TEST(ResultRefT, ConstructingSuccess) {
-        AClass success;
-
-        Ref<AClass> refObj(&success);
-        Result<Ref<AClass>, int> result(std::move(refObj));
-        TestSuccess(&result, &success);
-    }
-
-    // Test moving a success Result<Ref<T>, E>
-    TEST(ResultRefT, MovingSuccess) {
-        AClass success;
-
-        Ref<AClass> refObj(&success);
-        Result<Ref<AClass>, int> result(std::move(refObj));
-        Result<Ref<AClass>, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &success);
-    }
-
-    // Test returning a success Result<Ref<T>, E>
-    TEST(ResultRefT, ReturningSuccess) {
-        AClass success;
-        auto CreateSuccess = [&success]() -> Result<Ref<AClass>, int> {
-            return Ref<AClass>(&success);
-        };
-
-        Result<Ref<AClass>, int> result = CreateSuccess();
-        TestSuccess(&result, &success);
-    }
-
-    class OtherClass {
-      public:
-        int a = 0;
+// Test returning an error Result<T*, E>
+TEST(ResultBothPointer, ReturningError) {
+    auto CreateError = []() -> Result<float*, int> {
+        return {std::make_unique<int>(placeholderError)};
     };
-    class Base : public RefCounted {};
-    class Child : public OtherClass, public Base {};
 
-    // Test constructing a Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildConstructor) {
-        Child child;
-        Ref<Child> refChild(&child);
+    Result<float*, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-        Result<Ref<Base>, int> result(std::move(refChild));
-        TestSuccess<Base>(&result, &child);
+// Test constructing a success Result<T*, E>
+TEST(ResultBothPointer, ConstructingSuccess) {
+    Result<float*, int> result(&placeholderSuccess);
+    TestSuccess(&result, &placeholderSuccess);
+}
+
+// Test moving a success Result<T*, E>
+TEST(ResultBothPointer, MovingSuccess) {
+    Result<float*, int> result(&placeholderSuccess);
+    Result<float*, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &placeholderSuccess);
+}
+
+// Test returning a success Result<T*, E>
+TEST(ResultBothPointer, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
+
+    Result<float*, int*> result = CreateSuccess();
+    TestSuccess(&result, &placeholderSuccess);
+}
+
+// Tests converting from a Result<TChild*, E>
+TEST(ResultBothPointer, ConversionFromChildClass) {
+    struct T {
+        int a;
+    };
+    struct TChild : T {};
+
+    TChild child;
+    T* childAsT = &child;
+    {
+        Result<T*, int> result(&child);
+        TestSuccess(&result, childAsT);
     }
-
-    // Test copy constructing Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildCopyConstructor) {
-        Child child;
-        Ref<Child> refChild(&child);
-
-        Result<Ref<Child>, int> resultChild(std::move(refChild));
-        Result<Ref<Base>, int> result(std::move(resultChild));
-        TestSuccess<Base>(&result, &child);
+    {
+        Result<TChild*, int> resultChild(&child);
+        Result<T*, int> result(std::move(resultChild));
+        TestSuccess(&result, childAsT);
     }
-
-    // Test assignment operator for Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildAssignmentOperator) {
-        Child child;
-        Ref<Child> refChild(&child);
-
-        Result<Ref<Child>, int> resultChild(std::move(refChild));
-        Result<Ref<Base>, int> result = std::move(resultChild);
-        TestSuccess<Base>(&result, &child);
+    {
+        Result<TChild*, int> resultChild(&child);
+        Result<T*, int> result = std::move(resultChild);
+        TestSuccess(&result, childAsT);
     }
+}
 
-    // Result<T, E>
+// Result<const T*, E>
 
-    // Test constructing an error Result<T, E>
-    TEST(ResultGeneric, ConstructingError) {
-        Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
+// Test constructing an error Result<const T*, E>
+TEST(ResultBothPointerWithConstResult, ConstructingError) {
+    Result<const float*, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
 
-    // Test moving an error Result<T, E>
-    TEST(ResultGeneric, MovingError) {
-        Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
-        Result<std::vector<float>, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
+// Test moving an error Result<const T*, E>
+TEST(ResultBothPointerWithConstResult, MovingError) {
+    Result<const float*, int> result(std::make_unique<int>(placeholderError));
+    Result<const float*, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
 
-    // Test returning an error Result<T, E>
-    TEST(ResultGeneric, ReturningError) {
-        auto CreateError = []() -> Result<std::vector<float>, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
+// Test returning an error Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ReturningError) {
+    auto CreateError = []() -> Result<const float*, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
 
-        Result<std::vector<float>, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
+    Result<const float*, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-    // Test constructing a success Result<T, E>
-    TEST(ResultGeneric, ConstructingSuccess) {
-        Result<std::vector<float>, int> result({1.0f});
-        TestSuccess(&result, {1.0f});
-    }
+// Test constructing a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
+    Result<const float*, int> result(&placeholderConstSuccess);
+    TestSuccess(&result, &placeholderConstSuccess);
+}
 
-    // Test moving a success Result<T, E>
-    TEST(ResultGeneric, MovingSuccess) {
-        Result<std::vector<float>, int> result({1.0f});
-        Result<std::vector<float>, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, {1.0f});
-    }
+// Test moving a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, MovingSuccess) {
+    Result<const float*, int> result(&placeholderConstSuccess);
+    Result<const float*, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &placeholderConstSuccess);
+}
 
-    // Test returning a success Result<T, E>
-    TEST(ResultGeneric, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<std::vector<float>, int> { return {{1.0f}}; };
+// Test returning a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<const float*, int> { return {&placeholderConstSuccess}; };
 
-        Result<std::vector<float>, int> result = CreateSuccess();
-        TestSuccess(&result, {1.0f});
-    }
+    Result<const float*, int> result = CreateSuccess();
+    TestSuccess(&result, &placeholderConstSuccess);
+}
+
+// Result<Ref<T>, E>
+
+// Test constructing an error Result<Ref<T>, E>
+TEST(ResultRefT, ConstructingError) {
+    Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<Ref<T>, E>
+TEST(ResultRefT, MovingError) {
+    Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
+    Result<Ref<AClass>, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<Ref<T>, E>
+TEST(ResultRefT, ReturningError) {
+    auto CreateError = []() -> Result<Ref<AClass>, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
+
+    Result<Ref<AClass>, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
+
+// Test constructing a success Result<Ref<T>, E>
+TEST(ResultRefT, ConstructingSuccess) {
+    AClass success;
+
+    Ref<AClass> refObj(&success);
+    Result<Ref<AClass>, int> result(std::move(refObj));
+    TestSuccess(&result, &success);
+}
+
+// Test moving a success Result<Ref<T>, E>
+TEST(ResultRefT, MovingSuccess) {
+    AClass success;
+
+    Ref<AClass> refObj(&success);
+    Result<Ref<AClass>, int> result(std::move(refObj));
+    Result<Ref<AClass>, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &success);
+}
+
+// Test returning a success Result<Ref<T>, E>
+TEST(ResultRefT, ReturningSuccess) {
+    AClass success;
+    auto CreateSuccess = [&success]() -> Result<Ref<AClass>, int> { return Ref<AClass>(&success); };
+
+    Result<Ref<AClass>, int> result = CreateSuccess();
+    TestSuccess(&result, &success);
+}
+
+class OtherClass {
+  public:
+    int a = 0;
+};
+class Base : public RefCounted {};
+class Child : public OtherClass, public Base {};
+
+// Test constructing a Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildConstructor) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Base>, int> result(std::move(refChild));
+    TestSuccess<Base>(&result, &child);
+}
+
+// Test copy constructing Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildCopyConstructor) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Child>, int> resultChild(std::move(refChild));
+    Result<Ref<Base>, int> result(std::move(resultChild));
+    TestSuccess<Base>(&result, &child);
+}
+
+// Test assignment operator for Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildAssignmentOperator) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Child>, int> resultChild(std::move(refChild));
+    Result<Ref<Base>, int> result = std::move(resultChild);
+    TestSuccess<Base>(&result, &child);
+}
+
+// Result<T, E>
+
+// Test constructing an error Result<T, E>
+TEST(ResultGeneric, ConstructingError) {
+    Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<T, E>
+TEST(ResultGeneric, MovingError) {
+    Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
+    Result<std::vector<float>, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<T, E>
+TEST(ResultGeneric, ReturningError) {
+    auto CreateError = []() -> Result<std::vector<float>, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
+
+    Result<std::vector<float>, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
+
+// Test constructing a success Result<T, E>
+TEST(ResultGeneric, ConstructingSuccess) {
+    Result<std::vector<float>, int> result({1.0f});
+    TestSuccess(&result, {1.0f});
+}
+
+// Test moving a success Result<T, E>
+TEST(ResultGeneric, MovingSuccess) {
+    Result<std::vector<float>, int> result({1.0f});
+    Result<std::vector<float>, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, {1.0f});
+}
+
+// Test returning a success Result<T, E>
+TEST(ResultGeneric, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<std::vector<float>, int> { return {{1.0f}}; };
+
+    Result<std::vector<float>, int> result = CreateSuccess();
+    TestSuccess(&result, {1.0f});
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
index b2a4f10..da6a738 100644
--- a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
@@ -19,161 +19,159 @@
 
 namespace dawn::native {
 
-    constexpr uint64_t RingBufferAllocator::kInvalidOffset;
+constexpr uint64_t RingBufferAllocator::kInvalidOffset;
 
-    // Number of basic tests for Ringbuffer
-    TEST(RingBufferAllocatorTests, BasicTest) {
-        constexpr uint64_t sizeInBytes = 64000;
-        RingBufferAllocator allocator(sizeInBytes);
+// Number of basic tests for Ringbuffer
+TEST(RingBufferAllocatorTests, BasicTest) {
+    constexpr uint64_t sizeInBytes = 64000;
+    RingBufferAllocator allocator(sizeInBytes);
 
-        // Ensure no requests exist on empty buffer.
-        EXPECT_TRUE(allocator.Empty());
+    // Ensure no requests exist on empty buffer.
+    EXPECT_TRUE(allocator.Empty());
 
-        ASSERT_EQ(allocator.GetSize(), sizeInBytes);
+    ASSERT_EQ(allocator.GetSize(), sizeInBytes);
 
-        // Ensure failure upon sub-allocating an oversized request.
-        ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
-                  RingBufferAllocator::kInvalidOffset);
+    // Ensure failure upon sub-allocating an oversized request.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
+              RingBufferAllocator::kInvalidOffset);
 
-        // Fill the entire buffer with two requests of equal size.
-        ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
-        ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
+    // Fill the entire buffer with two requests of equal size.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
 
-        // Ensure the buffer is full.
-        ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
+    // Ensure the buffer is full.
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
+}
+
+// Tests that several ringbuffer allocations do not fail.
+TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
+    constexpr uint64_t maxNumOfFrames = 64000;
+    constexpr uint64_t frameSizeInBytes = 4;
+
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+
+    size_t offset = 0;
+    for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
+        offset = allocator.Allocate(frameSizeInBytes, i);
+        ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
     }
+}
 
-    // Tests that several ringbuffer allocations do not fail.
-    TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
-        constexpr uint64_t maxNumOfFrames = 64000;
-        constexpr uint64_t frameSizeInBytes = 4;
+// Tests ringbuffer sub-allocations of the same serial are correctly tracked.
+TEST(RingBufferAllocatorTests, AllocInSameFrame) {
+    constexpr uint64_t maxNumOfFrames = 3;
+    constexpr uint64_t frameSizeInBytes = 4;
 
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
 
-        size_t offset = 0;
-        for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
-            offset = allocator.Allocate(frameSizeInBytes, i);
-            ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
-        }
-    }
+    //    F1
+    //  [xxxx|--------]
+    size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
 
-    // Tests ringbuffer sub-allocations of the same serial are correctly tracked.
-    TEST(RingBufferAllocatorTests, AllocInSameFrame) {
-        constexpr uint64_t maxNumOfFrames = 3;
-        constexpr uint64_t frameSizeInBytes = 4;
+    //    F1   F2
+    //  [xxxx|xxxx|----]
 
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
 
-        //    F1
-        //  [xxxx|--------]
-        size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
+    //    F1     F2
+    //  [xxxx|xxxxxxxx]
 
-        //    F1   F2
-        //  [xxxx|xxxx|----]
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
 
-        offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+    ASSERT_EQ(offset, 8u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
 
-        //    F1     F2
-        //  [xxxx|xxxxxxxx]
+    allocator.Deallocate(ExecutionSerial(2));
 
-        offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+    ASSERT_EQ(allocator.GetUsedSize(), 0u);
+    EXPECT_TRUE(allocator.Empty());
+}
 
-        ASSERT_EQ(offset, 8u);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
+// Tests ringbuffer sub-allocation at various offsets.
+TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
+    constexpr uint64_t maxNumOfFrames = 10;
+    constexpr uint64_t frameSizeInBytes = 4;
 
-        allocator.Deallocate(ExecutionSerial(2));
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
 
-        ASSERT_EQ(allocator.GetUsedSize(), 0u);
-        EXPECT_TRUE(allocator.Empty());
-    }
-
-    // Tests ringbuffer sub-allocation at various offsets.
-    TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
-        constexpr uint64_t maxNumOfFrames = 10;
-        constexpr uint64_t frameSizeInBytes = 4;
-
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
-
-        // Sub-alloc the first eight frames.
-        ExecutionSerial serial(0);
-        while (serial < ExecutionSerial(8)) {
-            allocator.Allocate(frameSizeInBytes, serial);
-            serial++;
-        }
-
-        // Each frame corrresponds to the serial number (for simplicity).
-        //
-        //    F1   F2   F3   F4   F5   F6   F7   F8
-        //  [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
-        //
-
-        // Ensure an oversized allocation fails (only 8 bytes left)
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
-                  RingBufferAllocator::kInvalidOffset);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
-
-        // Reclaim the first 3 frames.
-        allocator.Deallocate(ExecutionSerial(2));
-
-        //                 F4   F5   F6   F7   F8
-        //  [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
-        //
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
-
-        // Re-try the over-sized allocation.
-        size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
-
-        //        F9       F4   F5   F6   F7   F8
-        //  [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
-        //                                         ^^^^^^^^ wasted
-
-        // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
-        // were added to F9's sub-allocation.
-        // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
-
-        ASSERT_EQ(offset, 0u);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
-
-        // Ensure we are full.
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
-                  RingBufferAllocator::kInvalidOffset);
-
-        // Reclaim the next two frames.
-        allocator.Deallocate(ExecutionSerial(4));
-
-        //        F9       F4   F5   F6   F7   F8
-        //  [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
-        //
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
-
-        // Sub-alloc the chunk in the middle.
+    // Sub-alloc the first eight frames.
+    ExecutionSerial serial(0);
+    while (serial < ExecutionSerial(8)) {
+        allocator.Allocate(frameSizeInBytes, serial);
         serial++;
-        offset = allocator.Allocate(frameSizeInBytes * 2, serial);
-
-        ASSERT_EQ(offset, frameSizeInBytes * 3);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
-
-        //        F9         F10      F6   F7   F8
-        //  [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
-        //
-
-        // Ensure we are full.
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
-                  RingBufferAllocator::kInvalidOffset);
-
-        // Reclaim all.
-        allocator.Deallocate(kMaxExecutionSerial);
-
-        EXPECT_TRUE(allocator.Empty());
     }
 
-    // Checks if ringbuffer sub-allocation does not overflow.
-    TEST(RingBufferAllocatorTests, RingBufferOverflow) {
-        RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
+    // Each frame corrresponds to the serial number (for simplicity).
+    //
+    //    F1   F2   F3   F4   F5   F6   F7   F8
+    //  [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
 
-        ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
-        ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
-                  RingBufferAllocator::kInvalidOffset);
-    }
+    // Ensure an oversized allocation fails (only 8 bytes left)
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
+              RingBufferAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Reclaim the first 3 frames.
+    allocator.Deallocate(ExecutionSerial(2));
+
+    //                 F4   F5   F6   F7   F8
+    //  [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
+
+    // Re-try the over-sized allocation.
+    size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //                                         ^^^^^^^^ wasted
+
+    // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
+    // were added to F9's sub-allocation.
+    // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
+
+    ASSERT_EQ(offset, 0u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim the next two frames.
+    allocator.Deallocate(ExecutionSerial(4));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Sub-alloc the chunk in the middle.
+    serial++;
+    offset = allocator.Allocate(frameSizeInBytes * 2, serial);
+
+    ASSERT_EQ(offset, frameSizeInBytes * 3);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    //        F9         F10      F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim all.
+    allocator.Deallocate(kMaxExecutionSerial);
+
+    EXPECT_TRUE(allocator.Empty());
+}
+
+// Checks if ringbuffer sub-allocation does not overflow.
+TEST(RingBufferAllocatorTests, RingBufferOverflow) {
+    RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
+
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
+              RingBufferAllocator::kInvalidOffset);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/SlabAllocatorTests.cpp b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
index fa16348..33f2b16 100644
--- a/src/dawn/tests/unittests/SlabAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
@@ -21,16 +21,15 @@
 
 namespace {
 
-    struct Foo : public PlacementAllocated {
-        explicit Foo(int value) : value(value) {
-        }
+struct Foo : public PlacementAllocated {
+    explicit Foo(int value) : value(value) {}
 
-        int value;
-    };
+    int value;
+};
 
-    struct alignas(256) AlignedFoo : public Foo {
-        using Foo::Foo;
-    };
+struct alignas(256) AlignedFoo : public Foo {
+    using Foo::Foo;
+};
 
 }  // namespace
 
diff --git a/src/dawn/tests/unittests/StackContainerTests.cpp b/src/dawn/tests/unittests/StackContainerTests.cpp
index 1143ca8..4325332 100644
--- a/src/dawn/tests/unittests/StackContainerTests.cpp
+++ b/src/dawn/tests/unittests/StackContainerTests.cpp
@@ -8,25 +8,21 @@
 #include <cstddef>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/common/RefCounted.h"
 #include "dawn/common/StackContainer.h"
+#include "gtest/gtest.h"
 
 namespace {
 
-    class Placeholder : public RefCounted {
-      public:
-        explicit Placeholder(int* alive) : mAlive(alive) {
-            ++*mAlive;
-        }
+class Placeholder : public RefCounted {
+  public:
+    explicit Placeholder(int* alive) : mAlive(alive) { ++*mAlive; }
 
-      private:
-        ~Placeholder() {
-            --*mAlive;
-        }
+  private:
+    ~Placeholder() { --*mAlive; }
 
-        int* const mAlive;
-    };
+    int* const mAlive;
+};
 
 }  // namespace
 
@@ -98,17 +94,15 @@
 
 namespace {
 
-    template <size_t alignment>
-    class AlignedData {
-      public:
-        AlignedData() {
-            memset(data_, 0, alignment);
-        }
-        ~AlignedData() = default;
-        AlignedData(const AlignedData&) = default;
-        AlignedData& operator=(const AlignedData&) = default;
-        alignas(alignment) char data_[alignment];
-    };
+template <size_t alignment>
+class AlignedData {
+  public:
+    AlignedData() { memset(data_, 0, alignment); }
+    ~AlignedData() = default;
+    AlignedData(const AlignedData&) = default;
+    AlignedData& operator=(const AlignedData&) = default;
+    alignas(alignment) char data_[alignment];
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/unittests/SubresourceStorageTests.cpp b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
index a4e49c0..fb2759a 100644
--- a/src/dawn/tests/unittests/SubresourceStorageTests.cpp
+++ b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
@@ -20,670 +20,659 @@
 
 namespace dawn::native {
 
-    // A fake class that replicates the behavior of SubresourceStorage but without any compression
-    // and is used to compare the results of operations on SubresourceStorage against the "ground
-    // truth" of FakeStorage.
-    template <typename T>
-    struct FakeStorage {
-        FakeStorage(Aspect aspects,
-                    uint32_t arrayLayerCount,
-                    uint32_t mipLevelCount,
-                    T initialValue = {})
-            : mAspects(aspects),
-              mArrayLayerCount(arrayLayerCount),
-              mMipLevelCount(mipLevelCount),
-              mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) {
-        }
+// A fake class that replicates the behavior of SubresourceStorage but without any compression
+// and is used to compare the results of operations on SubresourceStorage against the "ground
+// truth" of FakeStorage.
+template <typename T>
+struct FakeStorage {
+    FakeStorage(Aspect aspects,
+                uint32_t arrayLayerCount,
+                uint32_t mipLevelCount,
+                T initialValue = {})
+        : mAspects(aspects),
+          mArrayLayerCount(arrayLayerCount),
+          mMipLevelCount(mipLevelCount),
+          mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) {}
 
-        template <typename F>
-        void Update(const SubresourceRange& range, F&& updateFunc) {
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; layer++) {
-                    for (uint32_t level = range.baseMipLevel;
-                         level < range.baseMipLevel + range.levelCount; level++) {
-                        SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
-                        updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]);
-                    }
+    template <typename F>
+    void Update(const SubresourceRange& range, F&& updateFunc) {
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]);
+                }
+            }
+        }
+    }
+
+    template <typename U, typename F>
+    void Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+        for (Aspect aspect : IterateEnumMask(mAspects)) {
+            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+                for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)],
+                              other.Get(aspect, layer, level));
+                }
+            }
+        }
+    }
+
+    const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
+        return mData[GetDataIndex(aspect, arrayLayer, mipLevel)];
+    }
+
+    size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
+        return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
+    }
+
+    // Method that checks that this and real have exactly the same content. It does so via
+    // looping on all subresources and calling Get() (hence testing Get()). It also calls
+    // Iterate() checking that every subresource is mentioned exactly once and that its content
+    // is correct (hence testing Iterate()). Its implementation requires the RangeTracker below
+    // that itself needs FakeStorage<int> so it cannot be define inline with the other methods.
+    void CheckSameAs(const SubresourceStorage<T>& real);
+
+    Aspect mAspects;
+    uint32_t mArrayLayerCount;
+    uint32_t mMipLevelCount;
+
+    std::vector<T> mData;
+};
+
+// Track a set of ranges that have been seen and can assert that in aggregate they make exactly
+// a single range (and that each subresource was seen only once).
+struct RangeTracker {
+    template <typename T>
+    explicit RangeTracker(const SubresourceStorage<T>& s)
+        : mTracked(s.GetAspectsForTesting(),
+                   s.GetArrayLayerCountForTesting(),
+                   s.GetMipLevelCountForTesting(),
+                   0) {}
+
+    void Track(const SubresourceRange& range) {
+        // Add +1 to the subresources tracked.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 0u);
+            *counter += 1;
+        });
+    }
+
+    void CheckTrackedExactly(const SubresourceRange& range) {
+        // Check that all subresources in the range were tracked once and set the counter back
+        // to 0.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 1u);
+            *counter = 0;
+        });
+
+        // Now all subresources should be at 0.
+        for (int counter : mTracked.mData) {
+            ASSERT_EQ(counter, 0);
+        }
+    }
+
+    FakeStorage<uint32_t> mTracked;
+};
+
+template <typename T>
+void FakeStorage<T>::CheckSameAs(const SubresourceStorage<T>& real) {
+    EXPECT_EQ(real.GetAspectsForTesting(), mAspects);
+    EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount);
+    EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount);
+
+    RangeTracker tracker(real);
+    real.Iterate([&](const SubresourceRange& range, const T& data) {
+        // Check that the range is sensical.
+        EXPECT_TRUE(IsSubset(range.aspects, mAspects));
+
+        EXPECT_LT(range.baseArrayLayer, mArrayLayerCount);
+        EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount);
+
+        EXPECT_LT(range.baseMipLevel, mMipLevelCount);
+        EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount);
+
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    EXPECT_EQ(data, Get(aspect, layer, level));
+                    EXPECT_EQ(data, real.Get(aspect, layer, level));
                 }
             }
         }
 
-        template <typename U, typename F>
-        void Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
-            for (Aspect aspect : IterateEnumMask(mAspects)) {
-                for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-                    for (uint32_t level = 0; level < mMipLevelCount; level++) {
-                        SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
-                        mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)],
-                                  other.Get(aspect, layer, level));
-                    }
-                }
-            }
+        tracker.Track(range);
+    });
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount));
+}
+
+template <typename T>
+void CheckAspectCompressed(const SubresourceStorage<T>& s, Aspect aspect, bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+    uint32_t layerCount = s.GetArrayLayerCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == layerCount &&
+            range.levelCount == levelCount && range.baseArrayLayer == 0 &&
+            range.baseMipLevel == 0) {
+            seen = true;
         }
+    });
 
-        const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
-            return mData[GetDataIndex(aspect, arrayLayer, mipLevel)];
+    ASSERT_EQ(seen, expected);
+
+    // Check that the internal state of SubresourceStorage matches what we expect.
+    // If an aspect is compressed, all its layers should be internally tagged as compressed.
+    ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected);
+    if (expected) {
+        for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) {
+            ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer));
         }
+    }
+}
 
-        size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const {
-            uint32_t aspectIndex = GetAspectIndex(aspect);
-            return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
+template <typename T>
+void CheckLayerCompressed(const SubresourceStorage<T>& s,
+                          Aspect aspect,
+                          uint32_t layer,
+                          bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == 1 && range.levelCount == levelCount &&
+            range.baseArrayLayer == layer && range.baseMipLevel == 0) {
+            seen = true;
         }
+    });
 
-        // Method that checks that this and real have exactly the same content. It does so via
-        // looping on all subresources and calling Get() (hence testing Get()). It also calls
-        // Iterate() checking that every subresource is mentioned exactly once and that its content
-        // is correct (hence testing Iterate()). Its implementation requires the RangeTracker below
-        // that itself needs FakeStorage<int> so it cannot be define inline with the other methods.
-        void CheckSameAs(const SubresourceStorage<T>& real);
+    ASSERT_EQ(seen, expected);
+    ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected);
+}
 
-        Aspect mAspects;
-        uint32_t mArrayLayerCount;
-        uint32_t mMipLevelCount;
+struct SmallData {
+    uint32_t value = 0xF00;
+};
 
-        std::vector<T> mData;
-    };
+bool operator==(const SmallData& a, const SmallData& b) {
+    return a.value == b.value;
+}
 
-    // Track a set of ranges that have been seen and can assert that in aggregate they make exactly
-    // a single range (and that each subresource was seen only once).
-    struct RangeTracker {
-        template <typename T>
-        explicit RangeTracker(const SubresourceStorage<T>& s)
-            : mTracked(s.GetAspectsForTesting(),
-                       s.GetArrayLayerCountForTesting(),
-                       s.GetMipLevelCountForTesting(),
-                       0) {
-        }
+// Test that the default value is correctly set.
+TEST(SubresourceStorageTest, DefaultValue) {
+    // Test setting no default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0);
 
-        void Track(const SubresourceRange& range) {
-            // Add +1 to the subresources tracked.
-            mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
-                ASSERT_EQ(*counter, 0u);
-                *counter += 1;
-            });
-        }
-
-        void CheckTrackedExactly(const SubresourceRange& range) {
-            // Check that all subresources in the range were tracked once and set the counter back
-            // to 0.
-            mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
-                ASSERT_EQ(*counter, 1u);
-                *counter = 0;
-            });
-
-            // Now all subresources should be at 0.
-            for (int counter : mTracked.mData) {
-                ASSERT_EQ(counter, 0);
-            }
-        }
-
-        FakeStorage<uint32_t> mTracked;
-    };
-
-    template <typename T>
-    void FakeStorage<T>::CheckSameAs(const SubresourceStorage<T>& real) {
-        EXPECT_EQ(real.GetAspectsForTesting(), mAspects);
-        EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount);
-        EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount);
-
-        RangeTracker tracker(real);
-        real.Iterate([&](const SubresourceRange& range, const T& data) {
-            // Check that the range is sensical.
-            EXPECT_TRUE(IsSubset(range.aspects, mAspects));
-
-            EXPECT_LT(range.baseArrayLayer, mArrayLayerCount);
-            EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount);
-
-            EXPECT_LT(range.baseMipLevel, mMipLevelCount);
-            EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount);
-
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; layer++) {
-                    for (uint32_t level = range.baseMipLevel;
-                         level < range.baseMipLevel + range.levelCount; level++) {
-                        EXPECT_EQ(data, Get(aspect, layer, level));
-                        EXPECT_EQ(data, real.Get(aspect, layer, level));
-                    }
-                }
-            }
-
-            tracker.Track(range);
-        });
-
-        tracker.CheckTrackedExactly(
-            SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount));
+        FakeStorage<int> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
     }
 
-    template <typename T>
-    void CheckAspectCompressed(const SubresourceStorage<T>& s, Aspect aspect, bool expected) {
-        ASSERT(HasOneBit(aspect));
+    // Test setting a default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5, 42);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42);
 
-        uint32_t levelCount = s.GetMipLevelCountForTesting();
-        uint32_t layerCount = s.GetArrayLayerCountForTesting();
+        FakeStorage<int> f(Aspect::Color, 3, 5, 42);
+        f.CheckSameAs(s);
+    }
 
-        bool seen = false;
-        s.Iterate([&](const SubresourceRange& range, const T&) {
-            if (range.aspects == aspect && range.layerCount == layerCount &&
-                range.levelCount == levelCount && range.baseArrayLayer == 0 &&
-                range.baseMipLevel == 0) {
-                seen = true;
-            }
-        });
+    // Test setting no default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u);
 
-        ASSERT_EQ(seen, expected);
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
+    }
+    // Test setting a default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5, {007u});
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u);
 
-        // Check that the internal state of SubresourceStorage matches what we expect.
-        // If an aspect is compressed, all its layers should be internally tagged as compressed.
-        ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected);
-        if (expected) {
-            for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) {
-                ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer));
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5, {007u});
+        f.CheckSameAs(s);
+    }
+}
+
+// The tests for Update() all follow the same pattern of setting up a real and a fake storage
+// then performing one or multiple Update()s on them and checking:
+//  - They have the same content.
+//  - The Update() range was correct.
+//  - The aspects and layers have the expected "compressed" status.
+
+// Calls Update both on the read storage and the fake storage but intercepts the call to
+// updateFunc done by the real storage to check their ranges argument aggregate to exactly the
+// update range.
+template <typename T, typename F>
+void CallUpdateOnBoth(SubresourceStorage<T>* s,
+                      FakeStorage<T>* f,
+                      const SubresourceRange& range,
+                      F&& updateFunc) {
+    RangeTracker tracker(*s);
+
+    s->Update(range, [&](const SubresourceRange& range, T* data) {
+        tracker.Track(range);
+        updateFunc(range, data);
+    });
+    f->Update(range, updateFunc);
+
+    tracker.CheckTrackedExactly(range);
+    f->CheckSameAs(*s);
+}
+
+// Test updating a single subresource on a single-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 5, 7);
+    FakeStorage<int> f(Aspect::Color, 5, 7);
+
+    // Update a single subresource.
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, true);
+    CheckLayerCompressed(s, Aspect::Color, 3, false);
+    CheckLayerCompressed(s, Aspect::Color, 4, true);
+}
+
+// Test updating a single subresource on a multi-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 5, 3);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 5, 3);
+
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 0, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+}
+
+// Test updating as a stipple pattern on one of two aspects then updating it completely.
+TEST(SubresourceStorageTest, UpdateStipple) {
+    const uint32_t kLayers = 10;
+    const uint32_t kLevels = 7;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update with a stipple.
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
             }
         }
     }
 
-    template <typename T>
-    void CheckLayerCompressed(const SubresourceStorage<T>& s,
-                              Aspect aspect,
-                              uint32_t layer,
-                              bool expected) {
-        ASSERT(HasOneBit(aspect));
-
-        uint32_t levelCount = s.GetMipLevelCountForTesting();
-
-        bool seen = false;
-        s.Iterate([&](const SubresourceRange& range, const T&) {
-            if (range.aspects == aspect && range.layerCount == 1 &&
-                range.levelCount == levelCount && range.baseArrayLayer == layer &&
-                range.baseMipLevel == 0) {
-                seen = true;
-            }
-        });
-
-        ASSERT_EQ(seen, expected);
-        ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected);
+    // The depth should be fully uncompressed while the stencil stayed compressed.
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        CheckLayerCompressed(s, Aspect::Depth, layer, false);
     }
 
-    struct SmallData {
-        uint32_t value = 0xF00;
-    };
-
-    bool operator==(const SmallData& a, const SmallData& b) {
-        return a.value == b.value;
+    // Update completely with a single value. Recompression should happen!
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 31; });
     }
 
-    // Test that the default value is correctly set.
-    TEST(SubresourceStorageTest, DefaultValue) {
-        // Test setting no default value for a primitive type.
-        {
-            SubresourceStorage<int> s(Aspect::Color, 3, 5);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0);
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
 
-            FakeStorage<int> f(Aspect::Color, 3, 5);
-            f.CheckSameAs(s);
-        }
+// Test updating as a crossing band pattern:
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// Then updating completely.
+TEST(SubresourceStorageTest, UpdateTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
 
-        // Test setting a default value for a primitive type.
-        {
-            SubresourceStorage<int> s(Aspect::Color, 3, 5, 42);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42);
-
-            FakeStorage<int> f(Aspect::Color, 3, 5, 42);
-            f.CheckSameAs(s);
-        }
-
-        // Test setting no default value for a type with a default constructor.
-        {
-            SubresourceStorage<SmallData> s(Aspect::Color, 3, 5);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u);
-
-            FakeStorage<SmallData> f(Aspect::Color, 3, 5);
-            f.CheckSameAs(s);
-        }
-        // Test setting a default value for a type with a default constructor.
-        {
-            SubresourceStorage<SmallData> s(Aspect::Color, 3, 5, {007u});
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u);
-
-            FakeStorage<SmallData> f(Aspect::Color, 3, 5, {007u});
-            f.CheckSameAs(s);
-        }
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
     }
 
-    // The tests for Update() all follow the same pattern of setting up a real and a fake storage
-    // then performing one or multiple Update()s on them and checking:
-    //  - They have the same content.
-    //  - The Update() range was correct.
-    //  - The aspects and layers have the expected "compressed" status.
+    // The layers were fully updated so they should stay compressed.
+    CheckLayerCompressed(s, Aspect::Depth, 2, true);
+    CheckLayerCompressed(s, Aspect::Depth, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
 
-    // Calls Update both on the read storage and the fake storage but intercepts the call to
-    // updateFunc done by the real storage to check their ranges argument aggregate to exactly the
-    // update range.
-    template <typename T, typename F>
-    void CallUpdateOnBoth(SubresourceStorage<T>* s,
-                          FakeStorage<T>* f,
-                          const SubresourceRange& range,
-                          F&& updateFunc) {
-        RangeTracker tracker(*s);
-
-        s->Update(range, [&](const SubresourceRange& range, T* data) {
-            tracker.Track(range);
-            updateFunc(range, data);
-        });
-        f->Update(range, updateFunc);
-
-        tracker.CheckTrackedExactly(range);
-        f->CheckSameAs(*s);
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
     }
 
-    // Test updating a single subresource on a single-aspect storage.
-    TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) {
-        SubresourceStorage<int> s(Aspect::Color, 5, 7);
-        FakeStorage<int> f(Aspect::Color, 5, 7);
+    // The layers had to be decompressed in depth
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
 
-        // Update a single subresource.
-        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2);
+    // Update completely. Without a single value recompression shouldn't happen.
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange,
+                         [](const SubresourceRange&, int* data) { *data += 12; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+}
+
+// Test updating with extremal subresources
+//    - Then half of the array layers in full.
+//    - Then updating completely.
+TEST(SubresourceStorageTest, UpdateExtremas) {
+    const uint32_t kLayers = 6;
+    const uint32_t kLevels = 4;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+
+    // Update the two extrema
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update half of the layers in full with constant values. Some recompression should happen.
+    {
+        SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 123; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update completely. Recompression should happen!
+    {
+        SubresourceRange fullRange = SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 35; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// A regression test for an issue found while reworking the implementation where
+// RecompressAspect didn't correctly check that each each layer was compressed but only that
+// their 0th value was the same.
+TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
+    SubresourceStorage<int> s(Aspect::Color, 2, 2);
+    FakeStorage<int> f(Aspect::Color, 2, 2);
+
+    // Update 0th mip levels to some value, it should decompress the aspect and both layers.
+    {
+        SubresourceRange range(Aspect::Color, {0, 2}, {0, 1});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+
+    // Update the whole resource by doing +1. The aspects and layers should stay decompressed.
+    {
+        SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2);
         CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 2, true);
-        CheckLayerCompressed(s, Aspect::Color, 3, false);
-        CheckLayerCompressed(s, Aspect::Color, 4, true);
     }
 
-    // Test updating a single subresource on a multi-aspect storage.
-    TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) {
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 5, 3);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 5, 3);
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+}
 
-        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2);
-        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+// The tests for Merge() all follow the same as the Update() tests except that they use Update()
+// to set up the test storages.
 
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 0, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+// Similar to CallUpdateOnBoth but for Merge
+template <typename T, typename U, typename F>
+void CallMergeOnBoth(SubresourceStorage<T>* s,
+                     FakeStorage<T>* f,
+                     const SubresourceStorage<U>& other,
+                     F&& mergeFunc) {
+    RangeTracker tracker(*s);
+
+    s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) {
+        tracker.Track(range);
+        mergeFunc(range, data, otherData);
+    });
+    f->Merge(other, mergeFunc);
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount));
+    f->CheckSameAs(*s);
+}
+
+// Test merging two fully compressed single-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 4, 6);
+    FakeStorage<int> f(Aspect::Color, 4, 6);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Color, 4, 6, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// Test merging two fully compressed multi-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 6, 7);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 6, 7);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Depth | Aspect::Stencil, 6, 7, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
+
+// Test merging a fully compressed resource in a resource with the "cross band" pattern.
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// This provides coverage of using a single piece of data from `other` to update all of `s`
+TEST(SubresourceStorageTest, MergeFullInTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; });
     }
 
-    // Test updating as a stipple pattern on one of two aspects then updating it completely.
-    TEST(SubresourceStorageTest, UpdateStipple) {
-        const uint32_t kLayers = 10;
-        const uint32_t kLevels = 7;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    // Merge the fully compressed resource.
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17);
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
 
-        // Update with a stipple.
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            for (uint32_t level = 0; level < kLevels; level++) {
-                if ((layer + level) % 2 == 0) {
-                    SubresourceRange range =
-                        SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
-                    CallUpdateOnBoth(&s, &f, range,
-                                     [](const SubresourceRange&, int* data) { *data += 17; });
-                }
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the
+    // mip band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+// Test the reverse, mergign two-bands in a full resource. This provides coverage for
+// decompressing aspects / and partilly layers to match the compression of `other`
+TEST(SubresourceStorageTest, MergeTwoBandInFull) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+
+    // Update the two bands
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; });
+    }
+
+    // Merge the fully compressed resource.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
+
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the
+    // mip band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+
+// Test merging storage with a layer band in a stipple patterned storage. This provide coverage
+// for the code path that uses the same layer data for other multiple times.
+TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
+    const uint32_t kLayers = 3;
+    const uint32_t kLevels = 5;
+
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
+
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
             }
         }
-
-        // The depth should be fully uncompressed while the stencil stayed compressed.
-        CheckAspectCompressed(s, Aspect::Stencil, true);
-        CheckAspectCompressed(s, Aspect::Depth, false);
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            CheckLayerCompressed(s, Aspect::Depth, layer, false);
+        if (layer % 2 == 0) {
+            other.Update({Aspect::Color, {layer, 1}, {0, kLevels}},
+                         [](const SubresourceRange&, int* data) { *data += 8; });
         }
-
-        // Update completely with a single value. Recompression should happen!
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data = 31; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, true);
     }
 
-    // Test updating as a crossing band pattern:
-    //  - The first band is full layers [2, 3] on both aspects
-    //  - The second band is full mips [5, 6] on one aspect.
-    // Then updating completely.
-    TEST(SubresourceStorageTest, UpdateTwoBand) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    // Merge the band in the stipple.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
 
-        // Update the two bands
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
+    // None of the resulting layers are compressed.
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, false);
+}
 
-        // The layers were fully updated so they should stay compressed.
-        CheckLayerCompressed(s, Aspect::Depth, 2, true);
-        CheckLayerCompressed(s, Aspect::Depth, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+// Regression test for a missing check that layer 0 is compressed when recompressing.
+TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
 
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
-        }
-
-        // The layers had to be decompressed in depth
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-
-        // Update completely. Without a single value recompression shouldn't happen.
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data += 12; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Depth, false);
-        CheckAspectCompressed(s, Aspect::Stencil, false);
+    // Set up s with zeros except (0, 1) which is garbage.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
     }
 
-    // Test updating with extremal subresources
-    //    - Then half of the array layers in full.
-    //    - Then updating completely.
-    TEST(SubresourceStorageTest, UpdateExtremas) {
-        const uint32_t kLayers = 6;
-        const uint32_t kLevels = 4;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+    // Other is 2x2 of zeroes
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
 
-        // Update the two extrema
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
-        }
+    // Fake updating F with other which is fully compressed and will trigger recompression.
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {});
 
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+    // The Color aspect should not have been recompressed.
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+}
 
-        // Update half of the layers in full with constant values. Some recompression should happen.
-        {
-            SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data = 123; });
-        }
+// Regression test for aspect decompression not copying to layer 0
+TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels, 3);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels, 3);
 
-        CheckLayerCompressed(s, Aspect::Color, 0, true);
-        CheckLayerCompressed(s, Aspect::Color, 1, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
-
-        // Update completely. Recompression should happen!
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data = 35; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, true);
+    // Cause decompression by writing to a single subresource.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
     }
 
-    // A regression test for an issue found while reworking the implementation where
-    // RecompressAspect didn't correctly check that each each layer was compressed but only that
-    // their 0th value was the same.
-    TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
-        SubresourceStorage<int> s(Aspect::Color, 2, 2);
-        FakeStorage<int> f(Aspect::Color, 2, 2);
+    // Check that the aspect's value of 3 was correctly decompressed in layer 0.
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0));
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1));
+}
 
-        // Update 0th mip levels to some value, it should decompress the aspect and both layers.
-        {
-            SubresourceRange range(Aspect::Color, {0, 2}, {0, 1});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-
-        // Update the whole resource by doing +1. The aspects and layers should stay decompressed.
-        {
-            SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-    }
-
-    // The tests for Merge() all follow the same as the Update() tests except that they use Update()
-    // to set up the test storages.
-
-    // Similar to CallUpdateOnBoth but for Merge
-    template <typename T, typename U, typename F>
-    void CallMergeOnBoth(SubresourceStorage<T>* s,
-                         FakeStorage<T>* f,
-                         const SubresourceStorage<U>& other,
-                         F&& mergeFunc) {
-        RangeTracker tracker(*s);
-
-        s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) {
-            tracker.Track(range);
-            mergeFunc(range, data, otherData);
-        });
-        f->Merge(other, mergeFunc);
-
-        tracker.CheckTrackedExactly(
-            SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount));
-        f->CheckSameAs(*s);
-    }
-
-    // Test merging two fully compressed single-aspect resources.
-    TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) {
-        SubresourceStorage<int> s(Aspect::Color, 4, 6);
-        FakeStorage<int> f(Aspect::Color, 4, 6);
-
-        // Merge the whole resource in a single call.
-        SubresourceStorage<bool> other(Aspect::Color, 4, 6, true);
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
-            if (other) {
-                *data = 13;
-            }
-        });
-
-        CheckAspectCompressed(s, Aspect::Color, true);
-    }
-
-    // Test merging two fully compressed multi-aspect resources.
-    TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) {
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 6, 7);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 6, 7);
-
-        // Merge the whole resource in a single call.
-        SubresourceStorage<bool> other(Aspect::Depth | Aspect::Stencil, 6, 7, true);
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
-            if (other) {
-                *data = 13;
-            }
-        });
-
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, true);
-    }
-
-    // Test merging a fully compressed resource in a resource with the "cross band" pattern.
-    //  - The first band is full layers [2, 3] on both aspects
-    //  - The second band is full mips [5, 6] on one aspect.
-    // This provides coverage of using a single piece of data from `other` to update all of `s`
-    TEST(SubresourceStorageTest, MergeFullInTwoBand) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-
-        // Update the two bands
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; });
-        }
-
-        // Merge the fully compressed resource.
-        SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17);
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // The layers traversed by the mip band are still uncompressed.
-        CheckLayerCompressed(s, Aspect::Depth, 1, false);
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Depth, 4, false);
-
-        // Stencil is decompressed but all its layers are still compressed because there wasn't the
-        // mip band.
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 4, true);
-    }
-    // Test the reverse, mergign two-bands in a full resource. This provides coverage for
-    // decompressing aspects / and partilly layers to match the compression of `other`
-    TEST(SubresourceStorageTest, MergeTwoBandInFull) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
-
-        // Update the two bands
-        SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; });
-        }
-
-        // Merge the fully compressed resource.
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // The layers traversed by the mip band are still uncompressed.
-        CheckLayerCompressed(s, Aspect::Depth, 1, false);
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Depth, 4, false);
-
-        // Stencil is decompressed but all its layers are still compressed because there wasn't the
-        // mip band.
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 4, true);
-    }
-
-    // Test merging storage with a layer band in a stipple patterned storage. This provide coverage
-    // for the code path that uses the same layer data for other multiple times.
-    TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
-        const uint32_t kLayers = 3;
-        const uint32_t kLevels = 5;
-
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
-        SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
-
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            for (uint32_t level = 0; level < kLevels; level++) {
-                if ((layer + level) % 2 == 0) {
-                    SubresourceRange range =
-                        SubresourceRange::MakeSingle(Aspect::Color, layer, level);
-                    CallUpdateOnBoth(&s, &f, range,
-                                     [](const SubresourceRange&, int* data) { *data += 17; });
-                }
-            }
-            if (layer % 2 == 0) {
-                other.Update({Aspect::Color, {layer, 1}, {0, kLevels}},
-                             [](const SubresourceRange&, int* data) { *data += 8; });
-            }
-        }
-
-        // Merge the band in the stipple.
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // None of the resulting layers are compressed.
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-        CheckLayerCompressed(s, Aspect::Color, 2, false);
-    }
-
-    // Regression test for a missing check that layer 0 is compressed when recompressing.
-    TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
-        const uint32_t kLayers = 2;
-        const uint32_t kLevels = 2;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
-
-        // Set up s with zeros except (0, 1) which is garbage.
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data += 0xABC; });
-        }
-
-        // Other is 2x2 of zeroes
-        SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
-
-        // Fake updating F with other which is fully compressed and will trigger recompression.
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {});
-
-        // The Color aspect should not have been recompressed.
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-    }
-
-    // Regression test for aspect decompression not copying to layer 0
-    TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
-        const uint32_t kLayers = 2;
-        const uint32_t kLevels = 2;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels, 3);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels, 3);
-
-        // Cause decompression by writing to a single subresource.
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data += 0xABC; });
-        }
-
-        // Check that the aspect's value of 3 was correctly decompressed in layer 0.
-        CheckLayerCompressed(s, Aspect::Color, 0, true);
-        EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0));
-        EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1));
-    }
-
-    // Bugs found while testing:
-    //  - mLayersCompressed not initialized to true.
-    //  - DecompressLayer setting Compressed to true instead of false.
-    //  - Get() checking for !compressed instead of compressed for the early exit.
-    //  - ASSERT in RecompressLayers was inverted.
-    //  - Two != being converted to == during a rework.
-    //  - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
-    //  - Missing decompression of layer 0 after introducing mInlineAspectData.
+// Bugs found while testing:
+//  - mLayersCompressed not initialized to true.
+//  - DecompressLayer setting Compressed to true instead of false.
+//  - Get() checking for !compressed instead of compressed for the early exit.
+//  - ASSERT in RecompressLayers was inverted.
+//  - Two != being converted to == during a rework.
+//  - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
+//  - Missing decompression of layer 0 after introducing mInlineAspectData.
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ToBackendTests.cpp b/src/dawn/tests/unittests/ToBackendTests.cpp
index 2ee3a75..8d64fce 100644
--- a/src/dawn/tests/unittests/ToBackendTests.cpp
+++ b/src/dawn/tests/unittests/ToBackendTests.cpp
@@ -21,7 +21,7 @@
 
 // Make our own Base - Backend object pair, reusing the AdapterBase name
 namespace dawn::native {
-    class AdapterBase : public RefCounted {};
+class AdapterBase : public RefCounted {};
 
 class MyAdapter : public AdapterBase {};
 
diff --git a/src/dawn/tests/unittests/VersionTests.cpp b/src/dawn/tests/unittests/VersionTests.cpp
index 173456c..ae7ea8b 100644
--- a/src/dawn/tests/unittests/VersionTests.cpp
+++ b/src/dawn/tests/unittests/VersionTests.cpp
@@ -18,13 +18,15 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
-namespace dawn { namespace {
+namespace dawn {
+namespace {
 
-    using ::testing::SizeIs;
+using ::testing::SizeIs;
 
-    TEST(VersionTests, GitCommitHashLength) {
-        // Git hashes should be 40 characters long.
-        EXPECT_THAT(std::string(kGitHash), SizeIs(40));
-    }
+TEST(VersionTests, GitCommitHashLength) {
+    // Git hashes should be 40 characters long.
+    EXPECT_THAT(std::string(kGitHash), SizeIs(40));
+}
 
-}}  // namespace dawn::
+}  // namespace
+}  // namespace dawn
diff --git a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
index 1f94b98..1bb99398 100644
--- a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
+++ b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
@@ -25,515 +25,505 @@
 #include "gtest/gtest.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
+namespace {
 
-        struct TextureSpec {
-            uint32_t x;
-            uint32_t y;
-            uint32_t z;
-            uint32_t width;
-            uint32_t height;
-            uint32_t depthOrArrayLayers;
-            uint32_t texelBlockSizeInBytes;
-            uint32_t blockWidth = 1;
-            uint32_t blockHeight = 1;
-        };
+struct TextureSpec {
+    uint32_t x;
+    uint32_t y;
+    uint32_t z;
+    uint32_t width;
+    uint32_t height;
+    uint32_t depthOrArrayLayers;
+    uint32_t texelBlockSizeInBytes;
+    uint32_t blockWidth = 1;
+    uint32_t blockHeight = 1;
+};
 
-        struct BufferSpec {
-            uint64_t offset;
-            uint32_t bytesPerRow;
-            uint32_t rowsPerImage;
-        };
+struct BufferSpec {
+    uint64_t offset;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+};
 
-        // Check that each copy region fits inside the buffer footprint
-        void ValidateFootprints(const TextureSpec& textureSpec,
-                                const BufferSpec& bufferSpec,
-                                const TextureCopySubresource& copySplit,
-                                wgpu::TextureDimension dimension) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
-                ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
-                ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
-                          copy.bufferSize.depthOrArrayLayers);
+// Check that each copy region fits inside the buffer footprint
+void ValidateFootprints(const TextureSpec& textureSpec,
+                        const BufferSpec& bufferSpec,
+                        const TextureCopySubresource& copySplit,
+                        wgpu::TextureDimension dimension) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
+        ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
+        ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
+                  copy.bufferSize.depthOrArrayLayers);
 
-                // If there are multiple layers, 2D texture splitter actually splits each layer
-                // independently. See the details in Compute2DTextureCopySplits(). As a result,
-                // if we simply expand a copy region generated by 2D texture splitter to all
-                // layers, the copy region might be OOB. But that is not the approach that the
-                // current 2D texture splitter is doing, although Compute2DTextureCopySubresource
-                // forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
-                // the test below for 2D textures with multiple layers.
-                if (textureSpec.depthOrArrayLayers <= 1 ||
-                    dimension == wgpu::TextureDimension::e3D) {
-                    uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
-                    uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
-                    uint64_t minimumRequiredBufferSize =
-                        bufferSpec.offset +
-                        utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage,
-                                                   widthInBlocks, heightInBlocks,
-                                                   textureSpec.depthOrArrayLayers,
-                                                   textureSpec.texelBlockSizeInBytes);
+        // If there are multiple layers, 2D texture splitter actually splits each layer
+        // independently. See the details in Compute2DTextureCopySplits(). As a result,
+        // if we simply expand a copy region generated by 2D texture splitter to all
+        // layers, the copy region might be OOB. But that is not the approach that the
+        // current 2D texture splitter is doing, although Compute2DTextureCopySubresource
+        // forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
+        // the test below for 2D textures with multiple layers.
+        if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) {
+            uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
+            uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
+            uint64_t minimumRequiredBufferSize =
+                bufferSpec.offset +
+                utils::RequiredBytesInCopy(
+                    bufferSpec.bytesPerRow, bufferSpec.rowsPerImage, widthInBlocks, heightInBlocks,
+                    textureSpec.depthOrArrayLayers, textureSpec.texelBlockSizeInBytes);
 
-                    // The last pixel (buffer footprint) of each copy region depends on its
-                    // bufferOffset and copySize. It is not the last pixel where the bufferSize
-                    // ends.
-                    ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
-                    ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
-                    uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
-                    ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
-                    uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
+            // The last pixel (buffer footprint) of each copy region depends on its
+            // bufferOffset and copySize. It is not the last pixel where the bufferSize
+            // ends.
+            ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
+            ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
+            uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
+            ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
+            uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
 
-                    ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
-                    ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
-                    uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
-                    ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
-                    uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
+            ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
+            ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
+            uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
+            ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
+            uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
 
-                    uint64_t bufferSizeForFootprint =
-                        copy.alignedOffset +
-                        utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
-                                                   footprintWidthInBlocks, footprintHeightInBlocks,
-                                                   copy.bufferSize.depthOrArrayLayers,
-                                                   textureSpec.texelBlockSizeInBytes);
+            uint64_t bufferSizeForFootprint =
+                copy.alignedOffset +
+                utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
+                                           footprintWidthInBlocks, footprintHeightInBlocks,
+                                           copy.bufferSize.depthOrArrayLayers,
+                                           textureSpec.texelBlockSizeInBytes);
 
-                    // The buffer footprint of each copy region should not exceed the minimum
-                    // required buffer size. Otherwise, pixels accessed by copy may be OOB.
-                    ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
-                }
-            }
+            // The buffer footprint of each copy region should not exceed the minimum
+            // required buffer size. Otherwise, pixels accessed by copy may be OOB.
+            ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
         }
+    }
+}
 
-        // Check that the offset is aligned
-        void ValidateOffset(const TextureCopySubresource& copySplit) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                ASSERT_TRUE(Align(copySplit.copies[i].alignedOffset,
-                                  D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
-                            copySplit.copies[i].alignedOffset);
-            }
+// Check that the offset is aligned
+void ValidateOffset(const TextureCopySubresource& copySplit) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        ASSERT_TRUE(
+            Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
+            copySplit.copies[i].alignedOffset);
+    }
+}
+
+bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
+    return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
+}
+
+// Check that no pair of copy regions intersect each other
+void ValidateDisjoint(const TextureCopySubresource& copySplit) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& a = copySplit.copies[i];
+        for (uint32_t j = i + 1; j < copySplit.count; ++j) {
+            const auto& b = copySplit.copies[j];
+            // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
+            // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
+            // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
+            // included.
+            bool overlapX =
+                InclusiveRangesOverlap(a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
+                                       b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
+            bool overlapY = InclusiveRangesOverlap(
+                a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y,
+                b.textureOffset.y + b.copySize.height - 1);
+            bool overlapZ = InclusiveRangesOverlap(
+                a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
+                b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
+            ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
         }
+    }
+}
 
-        bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
-            return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
+// Check that the union of the copy regions exactly covers the texture region
+void ValidateTextureBounds(const TextureSpec& textureSpec,
+                           const TextureCopySubresource& copySplit) {
+    ASSERT_GT(copySplit.count, 0u);
+
+    uint32_t minX = copySplit.copies[0].textureOffset.x;
+    uint32_t minY = copySplit.copies[0].textureOffset.y;
+    uint32_t minZ = copySplit.copies[0].textureOffset.z;
+    uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
+    uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
+    uint32_t maxZ =
+        copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers;
+
+    for (uint32_t i = 1; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        minX = std::min(minX, copy.textureOffset.x);
+        minY = std::min(minY, copy.textureOffset.y);
+        minZ = std::min(minZ, copy.textureOffset.z);
+        maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
+        maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
+        maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
+    }
+
+    ASSERT_EQ(minX, textureSpec.x);
+    ASSERT_EQ(minY, textureSpec.y);
+    ASSERT_EQ(minZ, textureSpec.z);
+    ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
+    ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
+    ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
+}
+
+// Validate that the number of pixels copied is exactly equal to the number of pixels in the
+// texture region
+void ValidatePixelCount(const TextureSpec& textureSpec, const TextureCopySubresource& copySplit) {
+    uint32_t count = 0;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        uint32_t copiedPixels =
+            copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
+        ASSERT_GT(copiedPixels, 0u);
+        count += copiedPixels;
+    }
+    ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
+}
+
+// Check that every buffer offset is at the correct pixel location
+void ValidateBufferOffset(const TextureSpec& textureSpec,
+                          const BufferSpec& bufferSpec,
+                          const TextureCopySubresource& copySplit,
+                          wgpu::TextureDimension dimension) {
+    ASSERT_GT(copySplit.count, 0u);
+
+    uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+
+        uint32_t bytesPerRowInTexels =
+            bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+        uint32_t slicePitchInTexels =
+            bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
+        uint32_t absoluteTexelOffset =
+            copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
+            copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
+            copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
+
+        // There is one empty row at most in a 2D copy region. However, it is not true for
+        // a 3D texture copy region when we are copying the last row of each slice. We may
+        // need to offset a lot rows and copy.bufferOffset.y may be big.
+        if (dimension == wgpu::TextureDimension::e2D) {
+            ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
         }
+        ASSERT_EQ(copy.bufferOffset.z, 0u);
 
-        // Check that no pair of copy regions intersect each other
-        void ValidateDisjoint(const TextureCopySubresource& copySplit) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& a = copySplit.copies[i];
-                for (uint32_t j = i + 1; j < copySplit.count; ++j) {
-                    const auto& b = copySplit.copies[j];
-                    // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
-                    // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
-                    // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
-                    // included.
-                    bool overlapX = InclusiveRangesOverlap(
-                        a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
-                        b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
-                    bool overlapY = InclusiveRangesOverlap(
-                        a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1,
-                        b.textureOffset.y, b.textureOffset.y + b.copySize.height - 1);
-                    bool overlapZ = InclusiveRangesOverlap(
-                        a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
-                        b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
-                    ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
-                }
-            }
-        }
+        ASSERT_GE(absoluteTexelOffset,
+                  bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
+        uint32_t relativeTexelOffset = absoluteTexelOffset - bufferSpec.offset /
+                                                                 textureSpec.texelBlockSizeInBytes *
+                                                                 texelsPerBlock;
 
-        // Check that the union of the copy regions exactly covers the texture region
-        void ValidateTextureBounds(const TextureSpec& textureSpec,
-                                   const TextureCopySubresource& copySplit) {
-            ASSERT_GT(copySplit.count, 0u);
+        uint32_t z = relativeTexelOffset / slicePitchInTexels;
+        uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
+        uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
 
-            uint32_t minX = copySplit.copies[0].textureOffset.x;
-            uint32_t minY = copySplit.copies[0].textureOffset.y;
-            uint32_t minZ = copySplit.copies[0].textureOffset.z;
-            uint32_t maxX =
-                copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
-            uint32_t maxY =
-                copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
-            uint32_t maxZ = copySplit.copies[0].textureOffset.z +
-                            copySplit.copies[0].copySize.depthOrArrayLayers;
+        ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
+        ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
+        ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
+    }
+}
 
-            for (uint32_t i = 1; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                minX = std::min(minX, copy.textureOffset.x);
-                minY = std::min(minY, copy.textureOffset.y);
-                minZ = std::min(minZ, copy.textureOffset.z);
-                maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
-                maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
-                maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
-            }
+void ValidateCopySplit(const TextureSpec& textureSpec,
+                       const BufferSpec& bufferSpec,
+                       const TextureCopySubresource& copySplit,
+                       wgpu::TextureDimension dimension) {
+    ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
+    ValidateOffset(copySplit);
+    ValidateDisjoint(copySplit);
+    ValidateTextureBounds(textureSpec, copySplit);
+    ValidatePixelCount(textureSpec, copySplit);
+    ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
+}
 
-            ASSERT_EQ(minX, textureSpec.x);
-            ASSERT_EQ(minY, textureSpec.y);
-            ASSERT_EQ(minZ, textureSpec.z);
-            ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
-            ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
-            ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
-        }
+std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
+    os << "TextureSpec("
+       << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
+       << textureSpec.width << ", " << textureSpec.height << ", " << textureSpec.depthOrArrayLayers
+       << ")], " << textureSpec.texelBlockSizeInBytes << ")";
+    return os;
+}
 
-        // Validate that the number of pixels copied is exactly equal to the number of pixels in the
-        // texture region
-        void ValidatePixelCount(const TextureSpec& textureSpec,
-                                const TextureCopySubresource& copySplit) {
-            uint32_t count = 0;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                uint32_t copiedPixels =
-                    copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
-                ASSERT_GT(copiedPixels, 0u);
-                count += copiedPixels;
-            }
-            ASSERT_EQ(count,
-                      textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
-        }
+std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
+    os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
+       << bufferSpec.rowsPerImage << ")";
+    return os;
+}
 
-        // Check that every buffer offset is at the correct pixel location
-        void ValidateBufferOffset(const TextureSpec& textureSpec,
-                                  const BufferSpec& bufferSpec,
-                                  const TextureCopySubresource& copySplit,
-                                  wgpu::TextureDimension dimension) {
-            ASSERT_GT(copySplit.count, 0u);
+std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
+    os << "CopySplit" << std::endl;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        os << "  " << i << ": Texture at (" << copy.textureOffset.x << ", " << copy.textureOffset.y
+           << ", " << copy.textureOffset.z << "), size (" << copy.copySize.width << ", "
+           << copy.copySize.height << ", " << copy.copySize.depthOrArrayLayers << ")" << std::endl;
+        os << "  " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y
+           << ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", "
+           << copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")"
+           << std::endl;
+    }
+    return os;
+}
 
-            uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
+// Define base texture sizes and offsets to test with: some aligned, some unaligned
+constexpr TextureSpec kBaseTextureSpecs[] = {
+    {0, 0, 0, 1, 1, 1, 4},
+    {0, 0, 0, 64, 1, 1, 4},
+    {0, 0, 0, 128, 1, 1, 4},
+    {0, 0, 0, 192, 1, 1, 4},
+    {31, 16, 0, 1, 1, 1, 4},
+    {64, 16, 0, 1, 1, 1, 4},
+    {64, 16, 8, 1, 1, 1, 4},
 
-                uint32_t bytesPerRowInTexels =
-                    bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
-                uint32_t slicePitchInTexels =
-                    bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
-                uint32_t absoluteTexelOffset =
-                    copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
-                    copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
-                    copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
+    {0, 0, 0, 64, 2, 1, 4},
+    {0, 0, 0, 64, 1, 2, 4},
+    {0, 0, 0, 64, 2, 2, 4},
+    {0, 0, 0, 128, 2, 1, 4},
+    {0, 0, 0, 128, 1, 2, 4},
+    {0, 0, 0, 128, 2, 2, 4},
+    {0, 0, 0, 192, 2, 1, 4},
+    {0, 0, 0, 192, 1, 2, 4},
+    {0, 0, 0, 192, 2, 2, 4},
 
-                // There is one empty row at most in a 2D copy region. However, it is not true for
-                // a 3D texture copy region when we are copying the last row of each slice. We may
-                // need to offset a lot rows and copy.bufferOffset.y may be big.
-                if (dimension == wgpu::TextureDimension::e2D) {
-                    ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
-                }
-                ASSERT_EQ(copy.bufferOffset.z, 0u);
+    {0, 0, 0, 1024, 1024, 1, 4},
+    {256, 512, 0, 1024, 1024, 1, 4},
+    {64, 48, 0, 1024, 1024, 1, 4},
+    {64, 48, 16, 1024, 1024, 1024, 4},
 
-                ASSERT_GE(absoluteTexelOffset,
-                          bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
-                uint32_t relativeTexelOffset =
-                    absoluteTexelOffset -
-                    bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+    {0, 0, 0, 257, 31, 1, 4},
+    {0, 0, 0, 17, 93, 1, 4},
+    {59, 13, 0, 257, 31, 1, 4},
+    {17, 73, 0, 17, 93, 1, 4},
+    {17, 73, 59, 17, 93, 99, 4},
 
-                uint32_t z = relativeTexelOffset / slicePitchInTexels;
-                uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
-                uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
+    {0, 0, 0, 4, 4, 1, 8, 4, 4},
+    {64, 16, 0, 4, 4, 1, 8, 4, 4},
+    {64, 16, 8, 4, 4, 1, 8, 4, 4},
+    {0, 0, 0, 4, 4, 1, 16, 4, 4},
+    {64, 16, 0, 4, 4, 1, 16, 4, 4},
+    {64, 16, 8, 4, 4, 1, 16, 4, 4},
 
-                ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
-                ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
-                ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
-            }
-        }
+    {0, 0, 0, 1024, 1024, 1, 8, 4, 4},
+    {256, 512, 0, 1024, 1024, 1, 8, 4, 4},
+    {64, 48, 0, 1024, 1024, 1, 8, 4, 4},
+    {64, 48, 16, 1024, 1024, 1, 8, 4, 4},
+    {0, 0, 0, 1024, 1024, 1, 16, 4, 4},
+    {256, 512, 0, 1024, 1024, 1, 16, 4, 4},
+    {64, 48, 0, 1024, 1024, 1, 4, 16, 4},
+    {64, 48, 16, 1024, 1024, 1, 16, 4, 4},
+};
 
-        void ValidateCopySplit(const TextureSpec& textureSpec,
-                               const BufferSpec& bufferSpec,
-                               const TextureCopySubresource& copySplit,
-                               wgpu::TextureDimension dimension) {
-            ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
-            ValidateOffset(copySplit);
-            ValidateDisjoint(copySplit);
-            ValidateTextureBounds(textureSpec, copySplit);
-            ValidatePixelCount(textureSpec, copySplit);
-            ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
-        }
+// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
+// is the minimum required
+std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
+    uint32_t bytesPerRow =
+        Align(textureSpec.texelBlockSizeInBytes * textureSpec.width, kTextureBytesPerRowAlignment);
 
-        std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
-            os << "TextureSpec("
-               << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
-               << textureSpec.width << ", " << textureSpec.height << ", "
-               << textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes
-               << ")";
-            return os;
-        }
-
-        std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
-            os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
-               << bufferSpec.rowsPerImage << ")";
-            return os;
-        }
-
-        std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
-            os << "CopySplit" << std::endl;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                os << "  " << i << ": Texture at (" << copy.textureOffset.x << ", "
-                   << copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
-                   << copy.copySize.width << ", " << copy.copySize.height << ", "
-                   << copy.copySize.depthOrArrayLayers << ")" << std::endl;
-                os << "  " << i << ": Buffer at (" << copy.bufferOffset.x << ", "
-                   << copy.bufferOffset.y << ", " << copy.bufferOffset.z << "), footprint ("
-                   << copy.bufferSize.width << ", " << copy.bufferSize.height << ", "
-                   << copy.bufferSize.depthOrArrayLayers << ")" << std::endl;
-            }
-            return os;
-        }
-
-        // Define base texture sizes and offsets to test with: some aligned, some unaligned
-        constexpr TextureSpec kBaseTextureSpecs[] = {
-            {0, 0, 0, 1, 1, 1, 4},
-            {0, 0, 0, 64, 1, 1, 4},
-            {0, 0, 0, 128, 1, 1, 4},
-            {0, 0, 0, 192, 1, 1, 4},
-            {31, 16, 0, 1, 1, 1, 4},
-            {64, 16, 0, 1, 1, 1, 4},
-            {64, 16, 8, 1, 1, 1, 4},
-
-            {0, 0, 0, 64, 2, 1, 4},
-            {0, 0, 0, 64, 1, 2, 4},
-            {0, 0, 0, 64, 2, 2, 4},
-            {0, 0, 0, 128, 2, 1, 4},
-            {0, 0, 0, 128, 1, 2, 4},
-            {0, 0, 0, 128, 2, 2, 4},
-            {0, 0, 0, 192, 2, 1, 4},
-            {0, 0, 0, 192, 1, 2, 4},
-            {0, 0, 0, 192, 2, 2, 4},
-
-            {0, 0, 0, 1024, 1024, 1, 4},
-            {256, 512, 0, 1024, 1024, 1, 4},
-            {64, 48, 0, 1024, 1024, 1, 4},
-            {64, 48, 16, 1024, 1024, 1024, 4},
-
-            {0, 0, 0, 257, 31, 1, 4},
-            {0, 0, 0, 17, 93, 1, 4},
-            {59, 13, 0, 257, 31, 1, 4},
-            {17, 73, 0, 17, 93, 1, 4},
-            {17, 73, 59, 17, 93, 99, 4},
-
-            {0, 0, 0, 4, 4, 1, 8, 4, 4},
-            {64, 16, 0, 4, 4, 1, 8, 4, 4},
-            {64, 16, 8, 4, 4, 1, 8, 4, 4},
-            {0, 0, 0, 4, 4, 1, 16, 4, 4},
-            {64, 16, 0, 4, 4, 1, 16, 4, 4},
-            {64, 16, 8, 4, 4, 1, 16, 4, 4},
-
-            {0, 0, 0, 1024, 1024, 1, 8, 4, 4},
-            {256, 512, 0, 1024, 1024, 1, 8, 4, 4},
-            {64, 48, 0, 1024, 1024, 1, 8, 4, 4},
-            {64, 48, 16, 1024, 1024, 1, 8, 4, 4},
-            {0, 0, 0, 1024, 1024, 1, 16, 4, 4},
-            {256, 512, 0, 1024, 1024, 1, 16, 4, 4},
-            {64, 48, 0, 1024, 1024, 1, 4, 16, 4},
-            {64, 48, 16, 1024, 1024, 1, 16, 4, 4},
-        };
-
-        // Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
-        // is the minimum required
-        std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
-            uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
-                                         kTextureBytesPerRowAlignment);
-
-            auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
-                return value == 0 ? 0 : ((value - 1) / size + 1) * size;
-            };
-
-            return {
-                BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-
-                BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-
-                BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-            };
-        }
-
-        // Define a list of values to set properties in the spec structs
-        constexpr uint32_t kCheckValues[] = {
-            1,  2,  3,  4,   5,   6,   7,    8,     // small values
-            16, 32, 64, 128, 256, 512, 1024, 2048,  // powers of 2
-            15, 31, 63, 127, 257, 511, 1023, 2047,  // misalignments
-            17, 33, 65, 129, 257, 513, 1025, 2049};
-
-    }  // namespace
-
-    class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
-      protected:
-        void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
-            ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
-                   textureSpec.height % textureSpec.blockHeight == 0);
-
-            wgpu::TextureDimension dimension = GetParam();
-            TextureCopySubresource copySplit;
-            switch (dimension) {
-                case wgpu::TextureDimension::e2D: {
-                    copySplit = Compute2DTextureCopySubresource(
-                        {textureSpec.x, textureSpec.y, textureSpec.z},
-                        {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
-                        {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
-                         textureSpec.blockHeight},
-                        bufferSpec.offset, bufferSpec.bytesPerRow);
-                    break;
-                }
-                case wgpu::TextureDimension::e3D: {
-                    copySplit = Compute3DTextureCopySplits(
-                        {textureSpec.x, textureSpec.y, textureSpec.z},
-                        {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
-                        {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
-                         textureSpec.blockHeight},
-                        bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
-
-            if (HasFatalFailure()) {
-                std::ostringstream message;
-                message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
-                        << std::endl
-                        << dimension << " " << copySplit << std::endl;
-                FAIL() << message.str();
-            }
-        }
+    auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
+        return value == 0 ? 0 : ((value - 1) / size + 1) * size;
     };
 
-    TEST_P(CopySplitTest, General) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+    return {
+        BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+
+        BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+
+        BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+    };
+}
+
+// Define a list of values to set properties in the spec structs
+constexpr uint32_t kCheckValues[] = {1,  2,  3,  4,   5,   6,   7,    8,     // small values
+                                     16, 32, 64, 128, 256, 512, 1024, 2048,  // powers of 2
+                                     15, 31, 63, 127, 257, 511, 1023, 2047,  // misalignments
+                                     17, 33, 65, 129, 257, 513, 1025, 2049};
+
+}  // namespace
+
+class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
+  protected:
+    void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
+        ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
+               textureSpec.height % textureSpec.blockHeight == 0);
+
+        wgpu::TextureDimension dimension = GetParam();
+        TextureCopySubresource copySplit;
+        switch (dimension) {
+            case wgpu::TextureDimension::e2D: {
+                copySplit = Compute2DTextureCopySubresource(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow);
+                break;
+            }
+            case wgpu::TextureDimension::e3D: {
+                copySplit = Compute3DTextureCopySplits(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
+                break;
+            }
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
+
+        if (HasFatalFailure()) {
+            std::ostringstream message;
+            message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
+                    << std::endl
+                    << dimension << " " << copySplit << std::endl;
+            FAIL() << message.str();
+        }
+    }
+};
+
+TEST_P(CopySplitTest, General) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            DoTest(textureSpec, bufferSpec);
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureWidth) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockWidth != 0) {
+                continue;
+            }
+            textureSpec.width = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
                 DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, TextureWidth) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                if (val % textureSpec.blockWidth != 0) {
-                    continue;
-                }
-                textureSpec.width = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
+TEST_P(CopySplitTest, TextureHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockHeight != 0) {
+                continue;
             }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureHeight) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                if (val % textureSpec.blockHeight != 0) {
-                    continue;
-                }
-                textureSpec.height = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureX) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                textureSpec.x = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureY) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                textureSpec.y = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TexelSize) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
-                textureSpec.texelBlockSizeInBytes = texelSize;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, BufferOffset) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+            textureSpec.height = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                for (uint32_t val : kCheckValues) {
-                    bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, RowPitch) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+TEST_P(CopySplitTest, TextureX) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.x = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                uint32_t baseRowPitch = bufferSpec.bytesPerRow;
-                for (uint32_t i = 0; i < 5; ++i) {
-                    bufferSpec.bytesPerRow = baseRowPitch + i * 256;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, ImageHeight) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+TEST_P(CopySplitTest, TextureY) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.y = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                uint32_t baseImageHeight = bufferSpec.rowsPerImage;
-                for (uint32_t i = 0; i < 5; ++i) {
-                    bufferSpec.rowsPerImage = baseImageHeight + i * 256;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    INSTANTIATE_TEST_SUITE_P(,
-                             CopySplitTest,
-                             testing::Values(wgpu::TextureDimension::e2D,
-                                             wgpu::TextureDimension::e3D));
+TEST_P(CopySplitTest, TexelSize) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
+            textureSpec.texelBlockSizeInBytes = texelSize;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, BufferOffset) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            for (uint32_t val : kCheckValues) {
+                bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, RowPitch) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseRowPitch = bufferSpec.bytesPerRow;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.bytesPerRow = baseRowPitch + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, ImageHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseImageHeight = bufferSpec.rowsPerImage;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.rowsPerImage = baseImageHeight + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+                         CopySplitTest,
+                         testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/unittests/native/CacheKeyTests.cpp b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
index 42cd3e4..abd1acc 100644
--- a/src/dawn/tests/unittests/native/CacheKeyTests.cpp
+++ b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
@@ -23,162 +23,161 @@
 
 namespace dawn::native {
 
-    // Testing classes with mock serializing implemented for testing.
-    class A {
-      public:
-        MOCK_METHOD(void, SerializeMock, (CacheKey*, const A&), (const));
-    };
-    template <>
-    void CacheKeySerializer<A>::Serialize(CacheKey* key, const A& t) {
-        t.SerializeMock(key, t);
+// Testing classes with mock serializing implemented for testing.
+class A {
+  public:
+    MOCK_METHOD(void, SerializeMock, (CacheKey*, const A&), (const));
+};
+template <>
+void CacheKeySerializer<A>::Serialize(CacheKey* key, const A& t) {
+    t.SerializeMock(key, t);
+}
+
+// Custom printer for CacheKey for clearer debug testing messages.
+void PrintTo(const CacheKey& key, std::ostream* stream) {
+    *stream << std::hex;
+    for (const int b : key) {
+        *stream << std::setfill('0') << std::setw(2) << b << " ";
+    }
+    *stream << std::dec;
+}
+
+namespace {
+
+using ::testing::InSequence;
+using ::testing::NotNull;
+using ::testing::PrintToString;
+using ::testing::Ref;
+
+// Matcher to compare CacheKeys for easier testing.
+MATCHER_P(CacheKeyEq, key, PrintToString(key)) {
+    return arg.size() == key.size() && memcmp(arg.data(), key.data(), key.size()) == 0;
+}
+
+TEST(CacheKeyTests, RecordSingleMember) {
+    CacheKey key;
+
+    A a;
+    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+    EXPECT_THAT(key.Record(a), CacheKeyEq(CacheKey()));
+}
+
+TEST(CacheKeyTests, RecordManyMembers) {
+    constexpr size_t kNumMembers = 100;
+
+    CacheKey key;
+    for (size_t i = 0; i < kNumMembers; ++i) {
+        A a;
+        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        key.Record(a);
+    }
+    EXPECT_THAT(key, CacheKeyEq(CacheKey()));
+}
+
+TEST(CacheKeyTests, RecordIterable) {
+    constexpr size_t kIterableSize = 100;
+
+    // Expecting the size of the container.
+    CacheKey expected;
+    expected.Record(kIterableSize);
+
+    std::vector<A> iterable(kIterableSize);
+    {
+        InSequence seq;
+        for (const auto& a : iterable) {
+            EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        }
+        for (const auto& a : iterable) {
+            EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        }
     }
 
-    // Custom printer for CacheKey for clearer debug testing messages.
-    void PrintTo(const CacheKey& key, std::ostream* stream) {
-        *stream << std::hex;
-        for (const int b : key) {
-            *stream << std::setfill('0') << std::setw(2) << b << " ";
-        }
-        *stream << std::dec;
+    EXPECT_THAT(CacheKey().RecordIterable(iterable), CacheKeyEq(expected));
+    EXPECT_THAT(CacheKey().RecordIterable(iterable.data(), kIterableSize), CacheKeyEq(expected));
+}
+
+TEST(CacheKeyTests, RecordNested) {
+    CacheKey expected;
+    CacheKey actual;
+    {
+        // Recording a single member.
+        A a;
+        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        actual.Record(CacheKey().Record(a));
     }
-
-    namespace {
-
-        using ::testing::InSequence;
-        using ::testing::NotNull;
-        using ::testing::PrintToString;
-        using ::testing::Ref;
-
-        // Matcher to compare CacheKeys for easier testing.
-        MATCHER_P(CacheKeyEq, key, PrintToString(key)) {
-            return arg.size() == key.size() && memcmp(arg.data(), key.data(), key.size()) == 0;
-        }
-
-        TEST(CacheKeyTests, RecordSingleMember) {
-            CacheKey key;
-
+    {
+        // Recording multiple members.
+        constexpr size_t kNumMembers = 2;
+        CacheKey sub;
+        for (size_t i = 0; i < kNumMembers; ++i) {
             A a;
             EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-            EXPECT_THAT(key.Record(a), CacheKeyEq(CacheKey()));
+            sub.Record(a);
         }
-
-        TEST(CacheKeyTests, RecordManyMembers) {
-            constexpr size_t kNumMembers = 100;
-
-            CacheKey key;
-            for (size_t i = 0; i < kNumMembers; ++i) {
-                A a;
+        actual.Record(sub);
+    }
+    {
+        // Record an iterable.
+        constexpr size_t kIterableSize = 2;
+        expected.Record(kIterableSize);
+        std::vector<A> iterable(kIterableSize);
+        {
+            InSequence seq;
+            for (const auto& a : iterable) {
                 EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                key.Record(a);
             }
-            EXPECT_THAT(key, CacheKeyEq(CacheKey()));
         }
+        actual.Record(CacheKey().RecordIterable(iterable));
+    }
+    EXPECT_THAT(actual, CacheKeyEq(expected));
+}
 
-        TEST(CacheKeyTests, RecordIterable) {
-            constexpr size_t kIterableSize = 100;
+TEST(CacheKeySerializerTests, IntegralTypes) {
+    // Only testing explicitly sized types for simplicity, and using 0s for larger types to
+    // avoid dealing with endianess.
+    EXPECT_THAT(CacheKey().Record('c'), CacheKeyEq(CacheKey({'c'})));
+    EXPECT_THAT(CacheKey().Record(uint8_t(255)), CacheKeyEq(CacheKey({255})));
+    EXPECT_THAT(CacheKey().Record(uint16_t(0)), CacheKeyEq(CacheKey({0, 0})));
+    EXPECT_THAT(CacheKey().Record(uint32_t(0)), CacheKeyEq(CacheKey({0, 0, 0, 0})));
+}
 
-            // Expecting the size of the container.
-            CacheKey expected;
-            expected.Record(kIterableSize);
+TEST(CacheKeySerializerTests, FloatingTypes) {
+    // Using 0s to avoid dealing with implementation specific float details.
+    EXPECT_THAT(CacheKey().Record(float{0}), CacheKeyEq(CacheKey(sizeof(float), 0)));
+    EXPECT_THAT(CacheKey().Record(double{0}), CacheKeyEq(CacheKey(sizeof(double), 0)));
+}
 
-            std::vector<A> iterable(kIterableSize);
-            {
-                InSequence seq;
-                for (const auto& a : iterable) {
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                }
-                for (const auto& a : iterable) {
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                }
-            }
+TEST(CacheKeySerializerTests, LiteralStrings) {
+    // Using a std::string here to help with creating the expected result.
+    std::string str = "string";
 
-            EXPECT_THAT(CacheKey().RecordIterable(iterable), CacheKeyEq(expected));
-            EXPECT_THAT(CacheKey().RecordIterable(iterable.data(), kIterableSize),
-                        CacheKeyEq(expected));
-        }
+    CacheKey expected;
+    expected.Record(size_t(7));
+    expected.insert(expected.end(), str.begin(), str.end());
+    expected.push_back('\0');
 
-        TEST(CacheKeyTests, RecordNested) {
-            CacheKey expected;
-            CacheKey actual;
-            {
-                // Recording a single member.
-                A a;
-                EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                actual.Record(CacheKey().Record(a));
-            }
-            {
-                // Recording multiple members.
-                constexpr size_t kNumMembers = 2;
-                CacheKey sub;
-                for (size_t i = 0; i < kNumMembers; ++i) {
-                    A a;
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                    sub.Record(a);
-                }
-                actual.Record(sub);
-            }
-            {
-                // Record an iterable.
-                constexpr size_t kIterableSize = 2;
-                expected.Record(kIterableSize);
-                std::vector<A> iterable(kIterableSize);
-                {
-                    InSequence seq;
-                    for (const auto& a : iterable) {
-                        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                    }
-                }
-                actual.Record(CacheKey().RecordIterable(iterable));
-            }
-            EXPECT_THAT(actual, CacheKeyEq(expected));
-        }
+    EXPECT_THAT(CacheKey().Record("string"), CacheKeyEq(expected));
+}
 
-        TEST(CacheKeySerializerTests, IntegralTypes) {
-            // Only testing explicitly sized types for simplicity, and using 0s for larger types to
-            // avoid dealing with endianess.
-            EXPECT_THAT(CacheKey().Record('c'), CacheKeyEq(CacheKey({'c'})));
-            EXPECT_THAT(CacheKey().Record(uint8_t(255)), CacheKeyEq(CacheKey({255})));
-            EXPECT_THAT(CacheKey().Record(uint16_t(0)), CacheKeyEq(CacheKey({0, 0})));
-            EXPECT_THAT(CacheKey().Record(uint32_t(0)), CacheKeyEq(CacheKey({0, 0, 0, 0})));
-        }
+TEST(CacheKeySerializerTests, StdStrings) {
+    std::string str = "string";
 
-        TEST(CacheKeySerializerTests, FloatingTypes) {
-            // Using 0s to avoid dealing with implementation specific float details.
-            EXPECT_THAT(CacheKey().Record(float{0}), CacheKeyEq(CacheKey(sizeof(float), 0)));
-            EXPECT_THAT(CacheKey().Record(double{0}), CacheKeyEq(CacheKey(sizeof(double), 0)));
-        }
+    CacheKey expected;
+    expected.Record((size_t)6);
+    expected.insert(expected.end(), str.begin(), str.end());
 
-        TEST(CacheKeySerializerTests, LiteralStrings) {
-            // Using a std::string here to help with creating the expected result.
-            std::string str = "string";
+    EXPECT_THAT(CacheKey().Record(str), CacheKeyEq(expected));
+}
 
-            CacheKey expected;
-            expected.Record(size_t(7));
-            expected.insert(expected.end(), str.begin(), str.end());
-            expected.push_back('\0');
+TEST(CacheKeySerializerTests, CacheKeys) {
+    CacheKey data = {'d', 'a', 't', 'a'};
 
-            EXPECT_THAT(CacheKey().Record("string"), CacheKeyEq(expected));
-        }
+    CacheKey expected;
+    expected.insert(expected.end(), data.begin(), data.end());
 
-        TEST(CacheKeySerializerTests, StdStrings) {
-            std::string str = "string";
+    EXPECT_THAT(CacheKey().Record(data), CacheKeyEq(expected));
+}
 
-            CacheKey expected;
-            expected.Record((size_t)6);
-            expected.insert(expected.end(), str.begin(), str.end());
-
-            EXPECT_THAT(CacheKey().Record(str), CacheKeyEq(expected));
-        }
-
-        TEST(CacheKeySerializerTests, CacheKeys) {
-            CacheKey data = {'d', 'a', 't', 'a'};
-
-            CacheKey expected;
-            expected.insert(expected.end(), data.begin(), data.end());
-
-            EXPECT_THAT(CacheKey().Record(data), CacheKeyEq(expected));
-        }
-
-    }  // namespace
+}  // namespace
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
index 6a6f19f..cf76918 100644
--- a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
+++ b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
@@ -23,293 +23,290 @@
 
 namespace dawn::native {
 
-    class CommandBufferEncodingTests : public DawnNativeTest {
-      protected:
-        void ExpectCommands(
-            dawn::native::CommandIterator* commands,
-            std::vector<std::pair<dawn::native::Command,
-                                  std::function<void(dawn::native::CommandIterator*)>>>
-                expectedCommands) {
-            dawn::native::Command commandId;
-            for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
-                ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
-                ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
-                    << "at command " << commandIndex;
-                expectedCommands[commandIndex].second(commands);
+class CommandBufferEncodingTests : public DawnNativeTest {
+  protected:
+    void ExpectCommands(dawn::native::CommandIterator* commands,
+                        std::vector<std::pair<dawn::native::Command,
+                                              std::function<void(dawn::native::CommandIterator*)>>>
+                            expectedCommands) {
+        dawn::native::Command commandId;
+        for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
+            ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
+            ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
+                << "at command " << commandIndex;
+            expectedCommands[commandIndex].second(commands);
+        }
+    }
+};
+
+// Indirect dispatch validation changes the bind groups in the middle
+// of a pass. Test that bindings are restored after the validation runs.
+TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
+    wgpu::BindGroupLayout staticLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                           }});
+
+    wgpu::BindGroupLayout dynamicLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                               true,
+                                           }});
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1, 1, 1)
+        fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
+    csDesc.layout = pl0;
+    wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
+
+    wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
+    csDesc.layout = pl1;
+    wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
+
+    // Create buffers to use for both the indirect buffer and the bind groups.
+    wgpu::Buffer indirectBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
+
+    wgpu::BufferDescriptor uniformBufferDesc = {};
+    uniformBufferDesc.size = 512;
+    uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
+
+    wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
+
+    wgpu::BindGroup dynamicBG =
+        utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
+
+    uint32_t dynamicOffset = 256;
+    std::vector<uint32_t> emptyDynamicOffsets = {};
+    std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
+
+    // Begin encoding commands.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Perform a dispatch indirect which will be preceded by a validation dispatch.
+    pass.SetPipeline(pipeline0);
+    pass.SetBindGroup(0, staticBG);
+    pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Dispatch again to check that the restored state can be used.
+    // Also pass an indirect offset which should get replaced with the offset
+    // into the scratch indirect buffer (0).
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 4);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Change the pipeline
+    pass.SetPipeline(pipeline1);
+    pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
+    pass.SetBindGroup(1, staticBG);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
+
+    pass.End();
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
+        return [pipeline](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+            EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
+        };
+    };
+
+    auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
+                                 std::vector<uint32_t> offsets = {}) {
+        return [index, bg, offsets](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+            uint32_t* dynamicOffsets = nullptr;
+            if (cmd->dynamicOffsetCount > 0) {
+                dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
             }
+
+            ASSERT_EQ(cmd->index, BindGroupIndex(index));
+            ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
+            ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
+            for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
+                ASSERT_EQ(dynamicOffsets[i], offsets[i]);
+            }
+        };
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same buffer every time.
+    WGPUBuffer indirectScratchBuffer = nullptr;
+    auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
+        if (indirectScratchBuffer == nullptr) {
+            indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
+        }
+        ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
+        ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same pipeline every time.
+    WGPUComputePipeline validationPipeline = nullptr;
+    auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+        WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
+        if (validationPipeline != nullptr) {
+            EXPECT_EQ(pipeline, validationPipeline);
+        } else {
+            EXPECT_NE(pipeline, nullptr);
+            validationPipeline = pipeline;
         }
     };
 
-    // Indirect dispatch validation changes the bind groups in the middle
-    // of a pass. Test that bindings are restored after the validation runs.
-    TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
-        wgpu::BindGroupLayout staticLayout =
-            utils::MakeBindGroupLayout(device, {{
-                                                   0,
-                                                   wgpu::ShaderStage::Compute,
-                                                   wgpu::BufferBindingType::Uniform,
-                                               }});
+    auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+        ASSERT_EQ(cmd->index, BindGroupIndex(0));
+        ASSERT_NE(cmd->group.Get(), nullptr);
+        ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
+    };
 
-        wgpu::BindGroupLayout dynamicLayout =
-            utils::MakeBindGroupLayout(device, {{
-                                                   0,
-                                                   wgpu::ShaderStage::Compute,
-                                                   wgpu::BufferBindingType::Uniform,
-                                                   true,
-                                               }});
+    auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchCmd>();
+        ASSERT_EQ(cmd->x, 1u);
+        ASSERT_EQ(cmd->y, 1u);
+        ASSERT_EQ(cmd->z, 1u);
+    };
 
-        // Create a simple pipeline
-        wgpu::ComputePipelineDescriptor csDesc;
-        csDesc.compute.module = utils::CreateShaderModule(device, R"(
+    ExpectCommands(
+        FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
+        {
+            {Command::BeginComputePass,
+             [&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }},
+            // Expect the state to be set.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the state to be set (new pipeline).
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            {Command::EndComputePass,
+             [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
+        });
+}
+
+// Test that after restoring state, it is fully applied to the state tracker
+// and does not leak state changes that occured between a snapshot and the
+// state restoration.
+TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Snapshot the state.
+    CommandBufferStateTracker snapshot = *stateTracker;
+    // Expect no pipeline in the snapshot
+    EXPECT_FALSE(snapshot.HasPipeline());
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
         @stage(compute) @workgroup_size(1, 1, 1)
         fn main() {
         })");
-        csDesc.compute.entryPoint = "main";
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
 
-        wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
-        csDesc.layout = pl0;
-        wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
+    // Set the pipeline.
+    pass.SetPipeline(pipeline);
 
-        wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
-        csDesc.layout = pl1;
-        wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
+    // Expect the pipeline to be set.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
 
-        // Create buffers to use for both the indirect buffer and the bind groups.
-        wgpu::Buffer indirectBuffer = utils::CreateBufferFromData<uint32_t>(
-            device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
+    // Restore the state.
+    FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
 
-        wgpu::BufferDescriptor uniformBufferDesc = {};
-        uniformBufferDesc.size = 512;
-        uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
-        wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
-
-        wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
-
-        wgpu::BindGroup dynamicBG =
-            utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
-
-        uint32_t dynamicOffset = 256;
-        std::vector<uint32_t> emptyDynamicOffsets = {};
-        std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
-
-        // Begin encoding commands.
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-        CommandBufferStateTracker* stateTracker =
-            FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
-
-        // Perform a dispatch indirect which will be preceded by a validation dispatch.
-        pass.SetPipeline(pipeline0);
-        pass.SetBindGroup(0, staticBG);
-        pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
-
-        // Dispatch again to check that the restored state can be used.
-        // Also pass an indirect offset which should get replaced with the offset
-        // into the scratch indirect buffer (0).
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 4);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
-
-        // Change the pipeline
-        pass.SetPipeline(pipeline1);
-        pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
-        pass.SetBindGroup(1, staticBG);
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
-
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
-
-        pass.End();
-
-        wgpu::CommandBuffer commandBuffer = encoder.Finish();
-
-        auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
-            return [pipeline](CommandIterator* commands) {
-                auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
-                EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
-            };
-        };
-
-        auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
-                                     std::vector<uint32_t> offsets = {}) {
-            return [index, bg, offsets](CommandIterator* commands) {
-                auto* cmd = commands->NextCommand<SetBindGroupCmd>();
-                uint32_t* dynamicOffsets = nullptr;
-                if (cmd->dynamicOffsetCount > 0) {
-                    dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                }
-
-                ASSERT_EQ(cmd->index, BindGroupIndex(index));
-                ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
-                ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
-                for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
-                    ASSERT_EQ(dynamicOffsets[i], offsets[i]);
-                }
-            };
-        };
-
-        // Initialize as null. Once we know the pointer, we'll check
-        // that it's the same buffer every time.
-        WGPUBuffer indirectScratchBuffer = nullptr;
-        auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
-            if (indirectScratchBuffer == nullptr) {
-                indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
-            }
-            ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
-            ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
-        };
-
-        // Initialize as null. Once we know the pointer, we'll check
-        // that it's the same pipeline every time.
-        WGPUComputePipeline validationPipeline = nullptr;
-        auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
-            WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
-            if (validationPipeline != nullptr) {
-                EXPECT_EQ(pipeline, validationPipeline);
-            } else {
-                EXPECT_NE(pipeline, nullptr);
-                validationPipeline = pipeline;
-            }
-        };
-
-        auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<SetBindGroupCmd>();
-            ASSERT_EQ(cmd->index, BindGroupIndex(0));
-            ASSERT_NE(cmd->group.Get(), nullptr);
-            ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
-        };
-
-        auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<DispatchCmd>();
-            ASSERT_EQ(cmd->x, 1u);
-            ASSERT_EQ(cmd->y, 1u);
-            ASSERT_EQ(cmd->z, 1u);
-        };
-
-        ExpectCommands(
-            FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
-            {
-                {Command::BeginComputePass,
-                 [&](CommandIterator* commands) {
-                     SkipCommand(commands, Command::BeginComputePass);
-                 }},
-                // Expect the state to be set.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                // Expect the state to be set (new pipeline).
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                {Command::EndComputePass,
-                 [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
-            });
-    }
-
-    // Test that after restoring state, it is fully applied to the state tracker
-    // and does not leak state changes that occured between a snapshot and the
-    // state restoration.
-    TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-        CommandBufferStateTracker* stateTracker =
-            FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
-
-        // Snapshot the state.
-        CommandBufferStateTracker snapshot = *stateTracker;
-        // Expect no pipeline in the snapshot
-        EXPECT_FALSE(snapshot.HasPipeline());
-
-        // Create a simple pipeline
-        wgpu::ComputePipelineDescriptor csDesc;
-        csDesc.compute.module = utils::CreateShaderModule(device, R"(
-        @stage(compute) @workgroup_size(1, 1, 1)
-        fn main() {
-        })");
-        csDesc.compute.entryPoint = "main";
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
-
-        // Set the pipeline.
-        pass.SetPipeline(pipeline);
-
-        // Expect the pipeline to be set.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
-
-        // Restore the state.
-        FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
-
-        // Expect no pipeline
-        EXPECT_FALSE(stateTracker->HasPipeline());
-    }
+    // Expect no pipeline
+    EXPECT_FALSE(stateTracker->HasPipeline());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
index 47481c5..dd57ed5 100644
--- a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
@@ -33,795 +33,786 @@
 #include "mocks/SwapChainMock.h"
 #include "mocks/TextureMock.h"
 
-namespace dawn::native { namespace {
+namespace dawn::native {
+namespace {
 
-    using ::testing::_;
-    using ::testing::ByMove;
-    using ::testing::InSequence;
-    using ::testing::Return;
-    using ::testing::Test;
+using ::testing::_;
+using ::testing::ByMove;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::Test;
 
-    class DestroyObjectTests : public Test {
-      public:
-        DestroyObjectTests() : Test() {
-            // Skipping validation on descriptors as coverage for validation is already present.
-            mDevice.SetToggle(Toggle::SkipValidation, true);
-        }
+class DestroyObjectTests : public Test {
+  public:
+    DestroyObjectTests() : Test() {
+        // Skipping validation on descriptors as coverage for validation is already present.
+        mDevice.SetToggle(Toggle::SkipValidation, true);
+    }
 
-        Ref<TextureMock> GetTexture() {
-            if (mTexture != nullptr) {
-                return mTexture;
-            }
-            mTexture =
-                AcquireRef(new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal));
-            EXPECT_CALL(*mTexture.Get(), DestroyImpl).Times(1);
+    Ref<TextureMock> GetTexture() {
+        if (mTexture != nullptr) {
             return mTexture;
         }
+        mTexture = AcquireRef(new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal));
+        EXPECT_CALL(*mTexture.Get(), DestroyImpl).Times(1);
+        return mTexture;
+    }
 
-        Ref<PipelineLayoutMock> GetPipelineLayout() {
-            if (mPipelineLayout != nullptr) {
-                return mPipelineLayout;
-            }
-            mPipelineLayout = AcquireRef(new PipelineLayoutMock(&mDevice));
-            EXPECT_CALL(*mPipelineLayout.Get(), DestroyImpl).Times(1);
+    Ref<PipelineLayoutMock> GetPipelineLayout() {
+        if (mPipelineLayout != nullptr) {
             return mPipelineLayout;
         }
+        mPipelineLayout = AcquireRef(new PipelineLayoutMock(&mDevice));
+        EXPECT_CALL(*mPipelineLayout.Get(), DestroyImpl).Times(1);
+        return mPipelineLayout;
+    }
 
-        Ref<ShaderModuleMock> GetVertexShaderModule() {
-            if (mVsModule != nullptr) {
-                return mVsModule;
-            }
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(
-                mVsModule, ShaderModuleMock::Create(&mDevice, R"(
+    Ref<ShaderModuleMock> GetVertexShaderModule() {
+        if (mVsModule != nullptr) {
+            return mVsModule;
+        }
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(
+            mVsModule, ShaderModuleMock::Create(&mDevice, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })"),
-                { ASSERT(false); }, mVsModule);
-            EXPECT_CALL(*mVsModule.Get(), DestroyImpl).Times(1);
-            return mVsModule;
-        }
+            { ASSERT(false); }, mVsModule);
+        EXPECT_CALL(*mVsModule.Get(), DestroyImpl).Times(1);
+        return mVsModule;
+    }
 
-        Ref<ShaderModuleMock> GetComputeShaderModule() {
-            if (mCsModule != nullptr) {
-                return mCsModule;
-            }
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(
-                mCsModule, ShaderModuleMock::Create(&mDevice, R"(
-            @stage(compute) @workgroup_size(1) fn main() {
-            })"),
-                { ASSERT(false); }, mCsModule);
-            EXPECT_CALL(*mCsModule.Get(), DestroyImpl).Times(1);
+    Ref<ShaderModuleMock> GetComputeShaderModule() {
+        if (mCsModule != nullptr) {
             return mCsModule;
         }
-
-      protected:
-        DeviceMock mDevice;
-
-        // The following lazy-initialized objects are used to facilitate creation of dependent
-        // objects under test.
-        Ref<TextureMock> mTexture;
-        Ref<PipelineLayoutMock> mPipelineLayout;
-        Ref<ShaderModuleMock> mVsModule;
-        Ref<ShaderModuleMock> mCsModule;
-    };
-
-    TEST_F(DestroyObjectTests, BindGroupExplicit) {
-        BindGroupMock bindGroupMock(&mDevice);
-        EXPECT_CALL(bindGroupMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(bindGroupMock.IsAlive());
-        bindGroupMock.Destroy();
-        EXPECT_FALSE(bindGroupMock.IsAlive());
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(
+            mCsModule, ShaderModuleMock::Create(&mDevice, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })"),
+            { ASSERT(false); }, mCsModule);
+        EXPECT_CALL(*mCsModule.Get(), DestroyImpl).Times(1);
+        return mCsModule;
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BindGroupImplicit) {
-        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
-        EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+  protected:
+    DeviceMock mDevice;
+
+    // The following lazy-initialized objects are used to facilitate creation of dependent
+    // objects under test.
+    Ref<TextureMock> mTexture;
+    Ref<PipelineLayoutMock> mPipelineLayout;
+    Ref<ShaderModuleMock> mVsModule;
+    Ref<ShaderModuleMock> mCsModule;
+};
+
+TEST_F(DestroyObjectTests, BindGroupExplicit) {
+    BindGroupMock bindGroupMock(&mDevice);
+    EXPECT_CALL(bindGroupMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(bindGroupMock.IsAlive());
+    bindGroupMock.Destroy();
+    EXPECT_FALSE(bindGroupMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BindGroupImplicit) {
+    BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+    EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+    {
+        BindGroupDescriptor desc = {};
+        Ref<BindGroupBase> bindGroup;
+        EXPECT_CALL(mDevice, CreateBindGroupImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+
+        EXPECT_TRUE(bindGroup->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, BindGroupLayoutExplicit) {
+    BindGroupLayoutMock bindGroupLayoutMock(&mDevice);
+    EXPECT_CALL(bindGroupLayoutMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(bindGroupLayoutMock.IsAlive());
+    bindGroupLayoutMock.Destroy();
+    EXPECT_FALSE(bindGroupLayoutMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BindGroupLayoutImplicit) {
+    BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+    EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+    {
+        BindGroupLayoutDescriptor desc = {};
+        Ref<BindGroupLayoutBase> bindGroupLayout;
+        EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+
+        EXPECT_TRUE(bindGroupLayout->IsAlive());
+        EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, BufferExplicit) {
+    {
+        BufferMock bufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+        EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(bufferMock.IsAlive());
+        bufferMock.Destroy();
+        EXPECT_FALSE(bufferMock.IsAlive());
+    }
+    {
+        BufferMock bufferMock(&mDevice, BufferBase::BufferState::Mapped);
         {
-            BindGroupDescriptor desc = {};
-            Ref<BindGroupBase> bindGroup;
-            EXPECT_CALL(mDevice, CreateBindGroupImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
-
-            EXPECT_TRUE(bindGroup->IsAlive());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, BindGroupLayoutExplicit) {
-        BindGroupLayoutMock bindGroupLayoutMock(&mDevice);
-        EXPECT_CALL(bindGroupLayoutMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(bindGroupLayoutMock.IsAlive());
-        bindGroupLayoutMock.Destroy();
-        EXPECT_FALSE(bindGroupLayoutMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BindGroupLayoutImplicit) {
-        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
-        EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
-        {
-            BindGroupLayoutDescriptor desc = {};
-            Ref<BindGroupLayoutBase> bindGroupLayout;
-            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
-
-            EXPECT_TRUE(bindGroupLayout->IsAlive());
-            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, BufferExplicit) {
-        {
-            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            InSequence seq;
             EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(bufferMock.IsAlive());
-            bufferMock.Destroy();
-            EXPECT_FALSE(bufferMock.IsAlive());
+            EXPECT_CALL(bufferMock, UnmapImpl).Times(1);
         }
-        {
-            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Mapped);
-            {
-                InSequence seq;
-                EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
-                EXPECT_CALL(bufferMock, UnmapImpl).Times(1);
-            }
 
-            EXPECT_TRUE(bufferMock.IsAlive());
-            bufferMock.Destroy();
-            EXPECT_FALSE(bufferMock.IsAlive());
+        EXPECT_TRUE(bufferMock.IsAlive());
+        bufferMock.Destroy();
+        EXPECT_FALSE(bufferMock.IsAlive());
+    }
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BufferImplicit) {
+    {
+        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+        EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
+        {
+            BufferDescriptor desc = {};
+            Ref<BufferBase> buffer;
+            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+
+            EXPECT_TRUE(buffer->IsAlive());
         }
     }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BufferImplicit) {
+    {
+        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Mapped);
         {
-            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            InSequence seq;
             EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-            {
-                BufferDescriptor desc = {};
-                Ref<BufferBase> buffer;
-                EXPECT_CALL(mDevice, CreateBufferImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
-
-                EXPECT_TRUE(buffer->IsAlive());
-            }
+            EXPECT_CALL(*bufferMock, UnmapImpl).Times(1);
         }
         {
-            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Mapped);
-            {
-                InSequence seq;
-                EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-                EXPECT_CALL(*bufferMock, UnmapImpl).Times(1);
-            }
-            {
-                BufferDescriptor desc = {};
-                Ref<BufferBase> buffer;
-                EXPECT_CALL(mDevice, CreateBufferImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+            BufferDescriptor desc = {};
+            Ref<BufferBase> buffer;
+            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
 
-                EXPECT_TRUE(buffer->IsAlive());
-            }
+            EXPECT_TRUE(buffer->IsAlive());
         }
     }
+}
 
-    TEST_F(DestroyObjectTests, CommandBufferExplicit) {
-        CommandBufferMock commandBufferMock(&mDevice);
-        EXPECT_CALL(commandBufferMock, DestroyImpl).Times(1);
+TEST_F(DestroyObjectTests, CommandBufferExplicit) {
+    CommandBufferMock commandBufferMock(&mDevice);
+    EXPECT_CALL(commandBufferMock, DestroyImpl).Times(1);
 
-        EXPECT_TRUE(commandBufferMock.IsAlive());
-        commandBufferMock.Destroy();
-        EXPECT_FALSE(commandBufferMock.IsAlive());
+    EXPECT_TRUE(commandBufferMock.IsAlive());
+    commandBufferMock.Destroy();
+    EXPECT_FALSE(commandBufferMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, CommandBufferImplicit) {
+    CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+    EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
+    {
+        CommandBufferDescriptor desc = {};
+        Ref<CommandBufferBase> commandBuffer;
+        EXPECT_CALL(mDevice, CreateCommandBuffer)
+            .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+
+        EXPECT_TRUE(commandBuffer->IsAlive());
     }
+}
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, CommandBufferImplicit) {
-        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+TEST_F(DestroyObjectTests, ComputePipelineExplicit) {
+    ComputePipelineMock computePipelineMock(&mDevice);
+    EXPECT_CALL(computePipelineMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(computePipelineMock.IsAlive());
+    computePipelineMock.Destroy();
+    EXPECT_FALSE(computePipelineMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, ComputePipelineImplicit) {
+    // ComputePipelines usually set their hash values at construction, but the mock does not, so
+    // we set it here.
+    constexpr size_t hash = 0x12345;
+    ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+    computePipelineMock->SetContentHash(hash);
+    ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+    // Compute pipelines are initialized during their creation via the device.
+    EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
+    EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+
+    {
+        ComputePipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.compute.module = GetComputeShaderModule().Get();
+
+        Ref<ComputePipelineBase> computePipeline;
+        EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+
+        EXPECT_TRUE(computePipeline->IsAlive());
+        EXPECT_TRUE(computePipeline->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, ExternalTextureExplicit) {
+    ExternalTextureMock externalTextureMock(&mDevice);
+    EXPECT_CALL(externalTextureMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(externalTextureMock.IsAlive());
+    externalTextureMock.Destroy();
+    EXPECT_FALSE(externalTextureMock.IsAlive());
+}
+
+TEST_F(DestroyObjectTests, ExternalTextureImplicit) {
+    ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+    EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+    {
+        ExternalTextureDescriptor desc = {};
+        Ref<ExternalTextureBase> externalTexture;
+        EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+            .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+        DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+
+        EXPECT_TRUE(externalTexture->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, PipelineLayoutExplicit) {
+    PipelineLayoutMock pipelineLayoutMock(&mDevice);
+    EXPECT_CALL(pipelineLayoutMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(pipelineLayoutMock.IsAlive());
+    pipelineLayoutMock.Destroy();
+    EXPECT_FALSE(pipelineLayoutMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, PipelineLayoutImplicit) {
+    PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+    EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+    {
+        PipelineLayoutDescriptor desc = {};
+        Ref<PipelineLayoutBase> pipelineLayout;
+        EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+
+        EXPECT_TRUE(pipelineLayout->IsAlive());
+        EXPECT_TRUE(pipelineLayout->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, QuerySetExplicit) {
+    QuerySetMock querySetMock(&mDevice);
+    EXPECT_CALL(querySetMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(querySetMock.IsAlive());
+    querySetMock.Destroy();
+    EXPECT_FALSE(querySetMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, QuerySetImplicit) {
+    QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+    EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+    {
+        QuerySetDescriptor desc = {};
+        Ref<QuerySetBase> querySet;
+        EXPECT_CALL(mDevice, CreateQuerySetImpl).WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+        DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+
+        EXPECT_TRUE(querySet->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, RenderPipelineExplicit) {
+    RenderPipelineMock renderPipelineMock(&mDevice);
+    EXPECT_CALL(renderPipelineMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(renderPipelineMock.IsAlive());
+    renderPipelineMock.Destroy();
+    EXPECT_FALSE(renderPipelineMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, RenderPipelineImplicit) {
+    // RenderPipelines usually set their hash values at construction, but the mock does not, so
+    // we set it here.
+    constexpr size_t hash = 0x12345;
+    RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+    renderPipelineMock->SetContentHash(hash);
+    ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+    // Render pipelines are initialized during their creation via the device.
+    EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
+    EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+
+    {
+        RenderPipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.vertex.module = GetVertexShaderModule().Get();
+
+        Ref<RenderPipelineBase> renderPipeline;
+        EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+
+        EXPECT_TRUE(renderPipeline->IsAlive());
+        EXPECT_TRUE(renderPipeline->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, SamplerExplicit) {
+    SamplerMock samplerMock(&mDevice);
+    EXPECT_CALL(samplerMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(samplerMock.IsAlive());
+    samplerMock.Destroy();
+    EXPECT_FALSE(samplerMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, SamplerImplicit) {
+    SamplerMock* samplerMock = new SamplerMock(&mDevice);
+    EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+    {
+        SamplerDescriptor desc = {};
+        Ref<SamplerBase> sampler;
+        EXPECT_CALL(mDevice, CreateSamplerImpl).WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+        DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+
+        EXPECT_TRUE(sampler->IsAlive());
+        EXPECT_TRUE(sampler->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, ShaderModuleExplicit) {
+    ShaderModuleMock shaderModuleMock(&mDevice);
+    EXPECT_CALL(shaderModuleMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(shaderModuleMock.IsAlive());
+    shaderModuleMock.Destroy();
+    EXPECT_FALSE(shaderModuleMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, ShaderModuleImplicit) {
+    ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+    EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+    {
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = R"(
+                @stage(compute) @workgroup_size(1) fn main() {
+                }
+            )";
+        ShaderModuleDescriptor desc = {};
+        desc.nextInChain = &wgslDesc;
+        Ref<ShaderModuleBase> shaderModule;
+        EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+            .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+        DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+
+        EXPECT_TRUE(shaderModule->IsAlive());
+        EXPECT_TRUE(shaderModule->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, SwapChainExplicit) {
+    SwapChainMock swapChainMock(&mDevice);
+    EXPECT_CALL(swapChainMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(swapChainMock.IsAlive());
+    swapChainMock.Destroy();
+    EXPECT_FALSE(swapChainMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, SwapChainImplicit) {
+    SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+    EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+    {
+        SwapChainDescriptor desc = {};
+        Ref<SwapChainBase> swapChain;
+        EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+            .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+        DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+
+        EXPECT_TRUE(swapChain->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, TextureExplicit) {
+    {
+        TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+        EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(textureMock.IsAlive());
+        textureMock.Destroy();
+        EXPECT_FALSE(textureMock.IsAlive());
+    }
+    {
+        TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+        EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(textureMock.IsAlive());
+        textureMock.Destroy();
+        EXPECT_FALSE(textureMock.IsAlive());
+    }
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, TextureImplicit) {
+    {
+        TextureMock* textureMock =
+            new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        {
+            TextureDescriptor desc = {};
+            Ref<TextureBase> texture;
+            EXPECT_CALL(mDevice, CreateTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+            EXPECT_TRUE(texture->IsAlive());
+        }
+    }
+    {
+        TextureMock* textureMock =
+            new TextureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        {
+            TextureDescriptor desc = {};
+            Ref<TextureBase> texture;
+            EXPECT_CALL(mDevice, CreateTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+            EXPECT_TRUE(texture->IsAlive());
+        }
+    }
+}
+
+TEST_F(DestroyObjectTests, TextureViewExplicit) {
+    TextureViewMock textureViewMock(GetTexture().Get());
+    EXPECT_CALL(textureViewMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(textureViewMock.IsAlive());
+    textureViewMock.Destroy();
+    EXPECT_FALSE(textureViewMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, TextureViewImplicit) {
+    TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+    EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+    {
+        TextureViewDescriptor desc = {};
+        Ref<TextureViewBase> textureView;
+        EXPECT_CALL(mDevice, CreateTextureViewImpl)
+            .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+        DAWN_ASSERT_AND_ASSIGN(textureView, mDevice.CreateTextureView(GetTexture().Get(), &desc));
+
+        EXPECT_TRUE(textureView->IsAlive());
+    }
+}
+
+// Destroying the objects on the mDevice should result in all created objects being destroyed in
+// order.
+TEST_F(DestroyObjectTests, DestroyObjects) {
+    BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+    BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+    BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+    CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+    ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+    ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+    PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+    QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+    RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+    SamplerMock* samplerMock = new SamplerMock(&mDevice);
+    ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+    SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+    TextureMock* textureMock = new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+    TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+    {
+        InSequence seq;
         EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
-        {
-            CommandBufferDescriptor desc = {};
-            Ref<CommandBufferBase> commandBuffer;
-            EXPECT_CALL(mDevice, CreateCommandBuffer)
-                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
-
-            EXPECT_TRUE(commandBuffer->IsAlive());
-        }
+        EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
     }
 
-    TEST_F(DestroyObjectTests, ComputePipelineExplicit) {
-        ComputePipelineMock computePipelineMock(&mDevice);
-        EXPECT_CALL(computePipelineMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(computePipelineMock.IsAlive());
-        computePipelineMock.Destroy();
-        EXPECT_FALSE(computePipelineMock.IsAlive());
+    Ref<BindGroupBase> bindGroup;
+    {
+        BindGroupDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBindGroupImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+        EXPECT_TRUE(bindGroup->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, ComputePipelineImplicit) {
-        // ComputePipelines usually set their hash values at construction, but the mock does not, so
-        // we set it here.
+    Ref<BindGroupLayoutBase> bindGroupLayout;
+    {
+        BindGroupLayoutDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+        EXPECT_TRUE(bindGroupLayout->IsAlive());
+        EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+    }
+
+    Ref<BufferBase> buffer;
+    {
+        BufferDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+        EXPECT_TRUE(buffer->IsAlive());
+    }
+
+    Ref<CommandBufferBase> commandBuffer;
+    {
+        CommandBufferDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateCommandBuffer)
+            .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+        EXPECT_TRUE(commandBuffer->IsAlive());
+    }
+
+    Ref<ComputePipelineBase> computePipeline;
+    {
+        // Compute pipelines usually set their hash values at construction, but the mock does
+        // not, so we set it here.
         constexpr size_t hash = 0x12345;
-        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
         computePipelineMock->SetContentHash(hash);
         ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
 
         // Compute pipelines are initialized during their creation via the device.
         EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
-        EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
 
-        {
-            ComputePipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.compute.module = GetComputeShaderModule().Get();
-
-            Ref<ComputePipelineBase> computePipeline;
-            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
-
-            EXPECT_TRUE(computePipeline->IsAlive());
-            EXPECT_TRUE(computePipeline->IsCachedReference());
-        }
+        ComputePipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.compute.module = GetComputeShaderModule().Get();
+        EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+        EXPECT_TRUE(computePipeline->IsAlive());
+        EXPECT_TRUE(computePipeline->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, ExternalTextureExplicit) {
-        ExternalTextureMock externalTextureMock(&mDevice);
-        EXPECT_CALL(externalTextureMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(externalTextureMock.IsAlive());
-        externalTextureMock.Destroy();
-        EXPECT_FALSE(externalTextureMock.IsAlive());
+    Ref<ExternalTextureBase> externalTexture;
+    {
+        ExternalTextureDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+            .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+        DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+        EXPECT_TRUE(externalTexture->IsAlive());
     }
 
-    TEST_F(DestroyObjectTests, ExternalTextureImplicit) {
-        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
-        EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
-        {
-            ExternalTextureDescriptor desc = {};
-            Ref<ExternalTextureBase> externalTexture;
-            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
-            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
-
-            EXPECT_TRUE(externalTexture->IsAlive());
-        }
+    Ref<PipelineLayoutBase> pipelineLayout;
+    {
+        PipelineLayoutDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+        EXPECT_TRUE(pipelineLayout->IsAlive());
+        EXPECT_TRUE(pipelineLayout->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, PipelineLayoutExplicit) {
-        PipelineLayoutMock pipelineLayoutMock(&mDevice);
-        EXPECT_CALL(pipelineLayoutMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(pipelineLayoutMock.IsAlive());
-        pipelineLayoutMock.Destroy();
-        EXPECT_FALSE(pipelineLayoutMock.IsAlive());
+    Ref<QuerySetBase> querySet;
+    {
+        QuerySetDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateQuerySetImpl).WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+        DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+        EXPECT_TRUE(querySet->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, PipelineLayoutImplicit) {
-        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
-        EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
-        {
-            PipelineLayoutDescriptor desc = {};
-            Ref<PipelineLayoutBase> pipelineLayout;
-            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
-
-            EXPECT_TRUE(pipelineLayout->IsAlive());
-            EXPECT_TRUE(pipelineLayout->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, QuerySetExplicit) {
-        QuerySetMock querySetMock(&mDevice);
-        EXPECT_CALL(querySetMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(querySetMock.IsAlive());
-        querySetMock.Destroy();
-        EXPECT_FALSE(querySetMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, QuerySetImplicit) {
-        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
-        EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
-        {
-            QuerySetDescriptor desc = {};
-            Ref<QuerySetBase> querySet;
-            EXPECT_CALL(mDevice, CreateQuerySetImpl)
-                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
-            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
-
-            EXPECT_TRUE(querySet->IsAlive());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, RenderPipelineExplicit) {
-        RenderPipelineMock renderPipelineMock(&mDevice);
-        EXPECT_CALL(renderPipelineMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(renderPipelineMock.IsAlive());
-        renderPipelineMock.Destroy();
-        EXPECT_FALSE(renderPipelineMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, RenderPipelineImplicit) {
-        // RenderPipelines usually set their hash values at construction, but the mock does not, so
-        // we set it here.
+    Ref<RenderPipelineBase> renderPipeline;
+    {
+        // Render pipelines usually set their hash values at construction, but the mock does
+        // not, so we set it here.
         constexpr size_t hash = 0x12345;
-        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
         renderPipelineMock->SetContentHash(hash);
         ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
 
         // Render pipelines are initialized during their creation via the device.
         EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
-        EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
 
-        {
-            RenderPipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.vertex.module = GetVertexShaderModule().Get();
-
-            Ref<RenderPipelineBase> renderPipeline;
-            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
-
-            EXPECT_TRUE(renderPipeline->IsAlive());
-            EXPECT_TRUE(renderPipeline->IsCachedReference());
-        }
+        RenderPipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.vertex.module = GetVertexShaderModule().Get();
+        EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+        EXPECT_TRUE(renderPipeline->IsAlive());
+        EXPECT_TRUE(renderPipeline->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, SamplerExplicit) {
-        SamplerMock samplerMock(&mDevice);
-        EXPECT_CALL(samplerMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(samplerMock.IsAlive());
-        samplerMock.Destroy();
-        EXPECT_FALSE(samplerMock.IsAlive());
+    Ref<SamplerBase> sampler;
+    {
+        SamplerDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateSamplerImpl).WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+        DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+        EXPECT_TRUE(sampler->IsAlive());
+        EXPECT_TRUE(sampler->IsCachedReference());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, SamplerImplicit) {
-        SamplerMock* samplerMock = new SamplerMock(&mDevice);
-        EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
-        {
-            SamplerDescriptor desc = {};
-            Ref<SamplerBase> sampler;
-            EXPECT_CALL(mDevice, CreateSamplerImpl)
-                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
-            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
-
-            EXPECT_TRUE(sampler->IsAlive());
-            EXPECT_TRUE(sampler->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, ShaderModuleExplicit) {
-        ShaderModuleMock shaderModuleMock(&mDevice);
-        EXPECT_CALL(shaderModuleMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(shaderModuleMock.IsAlive());
-        shaderModuleMock.Destroy();
-        EXPECT_FALSE(shaderModuleMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, ShaderModuleImplicit) {
-        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
-        EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
-        {
-            ShaderModuleWGSLDescriptor wgslDesc;
-            wgslDesc.source = R"(
+    Ref<ShaderModuleBase> shaderModule;
+    {
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = R"(
                 @stage(compute) @workgroup_size(1) fn main() {
                 }
             )";
-            ShaderModuleDescriptor desc = {};
-            desc.nextInChain = &wgslDesc;
-            Ref<ShaderModuleBase> shaderModule;
-            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
-                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
-            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+        ShaderModuleDescriptor desc = {};
+        desc.nextInChain = &wgslDesc;
 
-            EXPECT_TRUE(shaderModule->IsAlive());
-            EXPECT_TRUE(shaderModule->IsCachedReference());
-        }
+        EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+            .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+        DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+        EXPECT_TRUE(shaderModule->IsAlive());
+        EXPECT_TRUE(shaderModule->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, SwapChainExplicit) {
-        SwapChainMock swapChainMock(&mDevice);
-        EXPECT_CALL(swapChainMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(swapChainMock.IsAlive());
-        swapChainMock.Destroy();
-        EXPECT_FALSE(swapChainMock.IsAlive());
+    Ref<SwapChainBase> swapChain;
+    {
+        SwapChainDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+            .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+        DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+        EXPECT_TRUE(swapChain->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, SwapChainImplicit) {
-        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
-        EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
-        {
-            SwapChainDescriptor desc = {};
-            Ref<SwapChainBase> swapChain;
-            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
-                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
-            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
-
-            EXPECT_TRUE(swapChain->IsAlive());
-        }
+    Ref<TextureBase> texture;
+    {
+        TextureDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateTextureImpl).WillOnce(Return(ByMove(AcquireRef(textureMock))));
+        DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+        EXPECT_TRUE(texture->IsAlive());
     }
 
-    TEST_F(DestroyObjectTests, TextureExplicit) {
-        {
-            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(textureMock.IsAlive());
-            textureMock.Destroy();
-            EXPECT_FALSE(textureMock.IsAlive());
-        }
-        {
-            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
-            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(textureMock.IsAlive());
-            textureMock.Destroy();
-            EXPECT_FALSE(textureMock.IsAlive());
-        }
+    Ref<TextureViewBase> textureView;
+    {
+        TextureViewDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateTextureViewImpl)
+            .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+        DAWN_ASSERT_AND_ASSIGN(textureView, mDevice.CreateTextureView(GetTexture().Get(), &desc));
+        EXPECT_TRUE(textureView->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, TextureImplicit) {
-        {
-            TextureMock* textureMock =
-                new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            {
-                TextureDescriptor desc = {};
-                Ref<TextureBase> texture;
-                EXPECT_CALL(mDevice, CreateTextureImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+    mDevice.DestroyObjects();
+    EXPECT_FALSE(bindGroup->IsAlive());
+    EXPECT_FALSE(bindGroupLayout->IsAlive());
+    EXPECT_FALSE(buffer->IsAlive());
+    EXPECT_FALSE(commandBuffer->IsAlive());
+    EXPECT_FALSE(computePipeline->IsAlive());
+    EXPECT_FALSE(externalTexture->IsAlive());
+    EXPECT_FALSE(pipelineLayout->IsAlive());
+    EXPECT_FALSE(querySet->IsAlive());
+    EXPECT_FALSE(renderPipeline->IsAlive());
+    EXPECT_FALSE(sampler->IsAlive());
+    EXPECT_FALSE(shaderModule->IsAlive());
+    EXPECT_FALSE(swapChain->IsAlive());
+    EXPECT_FALSE(texture->IsAlive());
+    EXPECT_FALSE(textureView->IsAlive());
+}
 
-                EXPECT_TRUE(texture->IsAlive());
-            }
-        }
-        {
-            TextureMock* textureMock =
-                new TextureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            {
-                TextureDescriptor desc = {};
-                Ref<TextureBase> texture;
-                EXPECT_CALL(mDevice, CreateTextureImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
-
-                EXPECT_TRUE(texture->IsAlive());
-            }
-        }
-    }
-
-    TEST_F(DestroyObjectTests, TextureViewExplicit) {
-        TextureViewMock textureViewMock(GetTexture().Get());
-        EXPECT_CALL(textureViewMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(textureViewMock.IsAlive());
-        textureViewMock.Destroy();
-        EXPECT_FALSE(textureViewMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, TextureViewImplicit) {
-        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
-        EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
-        {
-            TextureViewDescriptor desc = {};
-            Ref<TextureViewBase> textureView;
-            EXPECT_CALL(mDevice, CreateTextureViewImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
-            DAWN_ASSERT_AND_ASSIGN(textureView,
-                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
-
-            EXPECT_TRUE(textureView->IsAlive());
-        }
-    }
-
-    // Destroying the objects on the mDevice should result in all created objects being destroyed in
-    // order.
-    TEST_F(DestroyObjectTests, DestroyObjects) {
-        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
-        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
-        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
-        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
-        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
-        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
-        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
-        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
-        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
-        SamplerMock* samplerMock = new SamplerMock(&mDevice);
-        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
-        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
-        TextureMock* textureMock =
-            new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
-        {
-            InSequence seq;
-            EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-        }
-
-        Ref<BindGroupBase> bindGroup;
-        {
-            BindGroupDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBindGroupImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
-            EXPECT_TRUE(bindGroup->IsAlive());
-        }
-
-        Ref<BindGroupLayoutBase> bindGroupLayout;
-        {
-            BindGroupLayoutDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
-            EXPECT_TRUE(bindGroupLayout->IsAlive());
-            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
-        }
-
-        Ref<BufferBase> buffer;
-        {
-            BufferDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
-            EXPECT_TRUE(buffer->IsAlive());
-        }
-
-        Ref<CommandBufferBase> commandBuffer;
-        {
-            CommandBufferDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateCommandBuffer)
-                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
-            EXPECT_TRUE(commandBuffer->IsAlive());
-        }
-
-        Ref<ComputePipelineBase> computePipeline;
-        {
-            // Compute pipelines usually set their hash values at construction, but the mock does
-            // not, so we set it here.
-            constexpr size_t hash = 0x12345;
-            computePipelineMock->SetContentHash(hash);
-            ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
-
-            // Compute pipelines are initialized during their creation via the device.
-            EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
-
-            ComputePipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.compute.module = GetComputeShaderModule().Get();
-            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
-            EXPECT_TRUE(computePipeline->IsAlive());
-            EXPECT_TRUE(computePipeline->IsCachedReference());
-        }
-
-        Ref<ExternalTextureBase> externalTexture;
-        {
-            ExternalTextureDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
-            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
-            EXPECT_TRUE(externalTexture->IsAlive());
-        }
-
-        Ref<PipelineLayoutBase> pipelineLayout;
-        {
-            PipelineLayoutDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
-            EXPECT_TRUE(pipelineLayout->IsAlive());
-            EXPECT_TRUE(pipelineLayout->IsCachedReference());
-        }
-
-        Ref<QuerySetBase> querySet;
-        {
-            QuerySetDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateQuerySetImpl)
-                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
-            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
-            EXPECT_TRUE(querySet->IsAlive());
-        }
-
-        Ref<RenderPipelineBase> renderPipeline;
-        {
-            // Render pipelines usually set their hash values at construction, but the mock does
-            // not, so we set it here.
-            constexpr size_t hash = 0x12345;
-            renderPipelineMock->SetContentHash(hash);
-            ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
-
-            // Render pipelines are initialized during their creation via the device.
-            EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
-
-            RenderPipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.vertex.module = GetVertexShaderModule().Get();
-            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
-            EXPECT_TRUE(renderPipeline->IsAlive());
-            EXPECT_TRUE(renderPipeline->IsCachedReference());
-        }
-
-        Ref<SamplerBase> sampler;
-        {
-            SamplerDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateSamplerImpl)
-                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
-            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
-            EXPECT_TRUE(sampler->IsAlive());
-            EXPECT_TRUE(sampler->IsCachedReference());
-        }
-
-        Ref<ShaderModuleBase> shaderModule;
-        {
-            ShaderModuleWGSLDescriptor wgslDesc;
-            wgslDesc.source = R"(
-                @stage(compute) @workgroup_size(1) fn main() {
-                }
-            )";
-            ShaderModuleDescriptor desc = {};
-            desc.nextInChain = &wgslDesc;
-
-            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
-                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
-            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
-            EXPECT_TRUE(shaderModule->IsAlive());
-            EXPECT_TRUE(shaderModule->IsCachedReference());
-        }
-
-        Ref<SwapChainBase> swapChain;
-        {
-            SwapChainDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
-                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
-            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
-            EXPECT_TRUE(swapChain->IsAlive());
-        }
-
-        Ref<TextureBase> texture;
-        {
-            TextureDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
-            EXPECT_TRUE(texture->IsAlive());
-        }
-
-        Ref<TextureViewBase> textureView;
-        {
-            TextureViewDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateTextureViewImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
-            DAWN_ASSERT_AND_ASSIGN(textureView,
-                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
-            EXPECT_TRUE(textureView->IsAlive());
-        }
-
-        mDevice.DestroyObjects();
-        EXPECT_FALSE(bindGroup->IsAlive());
-        EXPECT_FALSE(bindGroupLayout->IsAlive());
-        EXPECT_FALSE(buffer->IsAlive());
-        EXPECT_FALSE(commandBuffer->IsAlive());
-        EXPECT_FALSE(computePipeline->IsAlive());
-        EXPECT_FALSE(externalTexture->IsAlive());
-        EXPECT_FALSE(pipelineLayout->IsAlive());
-        EXPECT_FALSE(querySet->IsAlive());
-        EXPECT_FALSE(renderPipeline->IsAlive());
-        EXPECT_FALSE(sampler->IsAlive());
-        EXPECT_FALSE(shaderModule->IsAlive());
-        EXPECT_FALSE(swapChain->IsAlive());
-        EXPECT_FALSE(texture->IsAlive());
-        EXPECT_FALSE(textureView->IsAlive());
-    }
-
-    static constexpr std::string_view kComputeShader = R"(
+static constexpr std::string_view kComputeShader = R"(
         @stage(compute) @workgroup_size(1) fn main() {}
     )";
 
-    static constexpr std::string_view kVertexShader = R"(
+static constexpr std::string_view kVertexShader = R"(
         @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
             return vec4<f32>(0.0, 0.0, 0.0, 0.0);
         }
     )";
 
-    static constexpr std::string_view kFragmentShader = R"(
+static constexpr std::string_view kFragmentShader = R"(
         @stage(fragment) fn main() {}
     )";
 
-    class DestroyObjectRegressionTests : public DawnNativeTest {};
+class DestroyObjectRegressionTests : public DawnNativeTest {};
 
-    // LastRefInCommand* tests are regression test(s) for https://crbug.com/chromium/1318792. The
-    // regression tests here are not exhuastive. In order to have an exhuastive test case for this
-    // class of failures, we should test every possible command with the commands holding the last
-    // references (or as last as possible) of their needed objects. For now, including simple cases
-    // including a stripped-down case from the original bug.
+// LastRefInCommand* tests are regression test(s) for https://crbug.com/chromium/1318792. The
+// regression tests here are not exhuastive. In order to have an exhuastive test case for this
+// class of failures, we should test every possible command with the commands holding the last
+// references (or as last as possible) of their needed objects. For now, including simple cases
+// including a stripped-down case from the original bug.
 
-    // Tests that when a RenderPipeline's last reference is held in a command in an unfinished
-    // CommandEncoder, that destroying the device still works as expected (and does not cause
-    // double-free).
-    TEST_F(DestroyObjectRegressionTests, LastRefInCommandRenderPipeline) {
-        utils::BasicRenderPass pass = utils::CreateBasicRenderPass(device, 1, 1);
+// Tests that when a RenderPipeline's last reference is held in a command in an unfinished
+// CommandEncoder, that destroying the device still works as expected (and does not cause
+// double-free).
+TEST_F(DestroyObjectRegressionTests, LastRefInCommandRenderPipeline) {
+    utils::BasicRenderPass pass = utils::CreateBasicRenderPass(device, 1, 1);
 
-        utils::ComboRenderPassDescriptor passDesc{};
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::RenderPassEncoder renderEncoder = encoder.BeginRenderPass(&pass.renderPassInfo);
+    utils::ComboRenderPassDescriptor passDesc{};
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder renderEncoder = encoder.BeginRenderPass(&pass.renderPassInfo);
 
-        utils::ComboRenderPipelineDescriptor pipelineDesc;
-        pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-        pipelineDesc.vertex.module = utils::CreateShaderModule(device, kVertexShader.data());
-        pipelineDesc.vertex.entryPoint = "main";
-        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, kFragmentShader.data());
-        pipelineDesc.cFragment.entryPoint = "main";
-        renderEncoder.SetPipeline(device.CreateRenderPipeline(&pipelineDesc));
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, kVertexShader.data());
+    pipelineDesc.vertex.entryPoint = "main";
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, kFragmentShader.data());
+    pipelineDesc.cFragment.entryPoint = "main";
+    renderEncoder.SetPipeline(device.CreateRenderPipeline(&pipelineDesc));
 
-        device.Destroy();
-    }
+    device.Destroy();
+}
 
-    // Tests that when a ComputePipelines's last reference is held in a command in an unfinished
-    // CommandEncoder, that destroying the device still works as expected (and does not cause
-    // double-free).
-    TEST_F(DestroyObjectRegressionTests, LastRefInCommandComputePipeline) {
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder computeEncoder = encoder.BeginComputePass();
+// Tests that when a ComputePipelines's last reference is held in a command in an unfinished
+// CommandEncoder, that destroying the device still works as expected (and does not cause
+// double-free).
+TEST_F(DestroyObjectRegressionTests, LastRefInCommandComputePipeline) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computeEncoder = encoder.BeginComputePass();
 
-        wgpu::ComputePipelineDescriptor pipelineDesc;
-        pipelineDesc.compute.module = utils::CreateShaderModule(device, kComputeShader.data());
-        pipelineDesc.compute.entryPoint = "main";
-        computeEncoder.SetPipeline(device.CreateComputePipeline(&pipelineDesc));
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.compute.module = utils::CreateShaderModule(device, kComputeShader.data());
+    pipelineDesc.compute.entryPoint = "main";
+    computeEncoder.SetPipeline(device.CreateComputePipeline(&pipelineDesc));
 
-        device.Destroy();
-    }
+    device.Destroy();
+}
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::native::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
index f7553ec..09fe994 100644
--- a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
+++ b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
@@ -25,166 +25,166 @@
 
 namespace {
 
-    using testing::Contains;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::SaveArg;
-    using testing::StrEq;
+using testing::Contains;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::SaveArg;
+using testing::StrEq;
 
-    class DeviceCreationTest : public testing::Test {
-      protected:
-        void SetUp() override {
-            dawnProcSetProcs(&dawn::native::GetProcs());
+class DeviceCreationTest : public testing::Test {
+  protected:
+    void SetUp() override {
+        dawnProcSetProcs(&dawn::native::GetProcs());
 
-            instance = std::make_unique<dawn::native::Instance>();
-            instance->DiscoverDefaultAdapters();
-            for (dawn::native::Adapter& nativeAdapter : instance->GetAdapters()) {
-                wgpu::AdapterProperties properties;
-                nativeAdapter.GetProperties(&properties);
+        instance = std::make_unique<dawn::native::Instance>();
+        instance->DiscoverDefaultAdapters();
+        for (dawn::native::Adapter& nativeAdapter : instance->GetAdapters()) {
+            wgpu::AdapterProperties properties;
+            nativeAdapter.GetProperties(&properties);
 
-                if (properties.backendType == wgpu::BackendType::Null) {
-                    adapter = wgpu::Adapter(nativeAdapter.Get());
-                    break;
-                }
+            if (properties.backendType == wgpu::BackendType::Null) {
+                adapter = wgpu::Adapter(nativeAdapter.Get());
+                break;
             }
-            ASSERT_NE(adapter, nullptr);
         }
-
-        void TearDown() override {
-            adapter = nullptr;
-            instance = nullptr;
-            dawnProcSetProcs(nullptr);
-        }
-
-        std::unique_ptr<dawn::native::Instance> instance;
-        wgpu::Adapter adapter;
-    };
-
-    // Test successful call to CreateDevice with no descriptor
-    TEST_F(DeviceCreationTest, CreateDeviceNoDescriptorSuccess) {
-        wgpu::Device device = adapter.CreateDevice();
-        EXPECT_NE(device, nullptr);
+        ASSERT_NE(adapter, nullptr);
     }
 
-    // Test successful call to CreateDevice with descriptor.
-    TEST_F(DeviceCreationTest, CreateDeviceSuccess) {
+    void TearDown() override {
+        adapter = nullptr;
+        instance = nullptr;
+        dawnProcSetProcs(nullptr);
+    }
+
+    std::unique_ptr<dawn::native::Instance> instance;
+    wgpu::Adapter adapter;
+};
+
+// Test successful call to CreateDevice with no descriptor
+TEST_F(DeviceCreationTest, CreateDeviceNoDescriptorSuccess) {
+    wgpu::Device device = adapter.CreateDevice();
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to CreateDevice with descriptor.
+TEST_F(DeviceCreationTest, CreateDeviceSuccess) {
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::Device device = adapter.CreateDevice(&desc);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to CreateDevice with toggle descriptor.
+TEST_F(DeviceCreationTest, CreateDeviceWithTogglesSuccess) {
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
+    desc.nextInChain = &togglesDesc;
+
+    const char* toggle = "skip_validation";
+    togglesDesc.forceEnabledToggles = &toggle;
+    togglesDesc.forceEnabledTogglesCount = 1;
+
+    wgpu::Device device = adapter.CreateDevice(&desc);
+    EXPECT_NE(device, nullptr);
+
+    auto toggles = dawn::native::GetTogglesUsed(device.Get());
+    EXPECT_THAT(toggles, Contains(StrEq(toggle)));
+}
+
+TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {
+    // Default device descriptor should have the same cache key as a device descriptor with a
+    // default cache descriptor.
+    {
         wgpu::DeviceDescriptor desc = {};
-        wgpu::Device device = adapter.CreateDevice(&desc);
-        EXPECT_NE(device, nullptr);
-    }
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-    // Test successful call to CreateDevice with toggle descriptor.
-    TEST_F(DeviceCreationTest, CreateDeviceWithTogglesSuccess) {
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        desc.nextInChain = &cacheDesc;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+
+        EXPECT_EQ(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
+    }
+    // Default device descriptor should not have the same cache key as a device descriptor with
+    // a non-default cache descriptor.
+    {
         wgpu::DeviceDescriptor desc = {};
-        wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
-        desc.nextInChain = &togglesDesc;
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-        const char* toggle = "skip_validation";
-        togglesDesc.forceEnabledToggles = &toggle;
-        togglesDesc.forceEnabledTogglesCount = 1;
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        desc.nextInChain = &cacheDesc;
+        const char* isolationKey = "isolation key";
+        cacheDesc.isolationKey = isolationKey;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device2, nullptr);
 
-        wgpu::Device device = adapter.CreateDevice(&desc);
-        EXPECT_NE(device, nullptr);
-
-        auto toggles = dawn::native::GetTogglesUsed(device.Get());
-        EXPECT_THAT(toggles, Contains(StrEq(toggle)));
+        EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
     }
+    // Two non-default cache descriptors should not have the same cache key.
+    {
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        const char* isolationKey1 = "isolation key 1";
+        const char* isolationKey2 = "isolation key 2";
+        wgpu::DeviceDescriptor desc = {};
+        desc.nextInChain = &cacheDesc;
 
-    TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {
-        // Default device descriptor should have the same cache key as a device descriptor with a
-        // default cache descriptor.
-        {
-            wgpu::DeviceDescriptor desc = {};
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
+        cacheDesc.isolationKey = isolationKey1;
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            desc.nextInChain = &cacheDesc;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
+        cacheDesc.isolationKey = isolationKey2;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device2, nullptr);
 
-            EXPECT_EQ(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
-        // Default device descriptor should not have the same cache key as a device descriptor with
-        // a non-default cache descriptor.
-        {
-            wgpu::DeviceDescriptor desc = {};
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
-
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            desc.nextInChain = &cacheDesc;
-            const char* isolationKey = "isolation key";
-            cacheDesc.isolationKey = isolationKey;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device2, nullptr);
-
-            EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
-        // Two non-default cache descriptors should not have the same cache key.
-        {
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            const char* isolationKey1 = "isolation key 1";
-            const char* isolationKey2 = "isolation key 2";
-            wgpu::DeviceDescriptor desc = {};
-            desc.nextInChain = &cacheDesc;
-
-            cacheDesc.isolationKey = isolationKey1;
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
-
-            cacheDesc.isolationKey = isolationKey2;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device2, nullptr);
-
-            EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
+        EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
     }
+}
 
-    // Test successful call to RequestDevice with descriptor
-    TEST_F(DeviceCreationTest, RequestDeviceSuccess) {
-        WGPUDevice cDevice;
-        {
-            MockCallback<WGPURequestDeviceCallback> cb;
-            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cDevice));
-
-            wgpu::DeviceDescriptor desc = {};
-            adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
-        }
-
-        wgpu::Device device = wgpu::Device::Acquire(cDevice);
-        EXPECT_NE(device, nullptr);
-    }
-
-    // Test successful call to RequestDevice with a null descriptor
-    TEST_F(DeviceCreationTest, RequestDeviceNullDescriptorSuccess) {
-        WGPUDevice cDevice;
-        {
-            MockCallback<WGPURequestDeviceCallback> cb;
-            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cDevice));
-
-            adapter.RequestDevice(nullptr, cb.Callback(), cb.MakeUserdata(this));
-        }
-
-        wgpu::Device device = wgpu::Device::Acquire(cDevice);
-        EXPECT_NE(device, nullptr);
-    }
-
-    // Test failing call to RequestDevice with invalid feature
-    TEST_F(DeviceCreationTest, RequestDeviceFailure) {
+// Test successful call to RequestDevice with descriptor
+TEST_F(DeviceCreationTest, RequestDeviceSuccess) {
+    WGPUDevice cDevice;
+    {
         MockCallback<WGPURequestDeviceCallback> cb;
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cDevice));
 
         wgpu::DeviceDescriptor desc = {};
-        wgpu::FeatureName invalidFeature = static_cast<wgpu::FeatureName>(WGPUFeatureName_Force32);
-        desc.requiredFeatures = &invalidFeature;
-        desc.requiredFeaturesCount = 1;
-
         adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
     }
 
+    wgpu::Device device = wgpu::Device::Acquire(cDevice);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to RequestDevice with a null descriptor
+TEST_F(DeviceCreationTest, RequestDeviceNullDescriptorSuccess) {
+    WGPUDevice cDevice;
+    {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cDevice));
+
+        adapter.RequestDevice(nullptr, cb.Callback(), cb.MakeUserdata(this));
+    }
+
+    wgpu::Device device = wgpu::Device::Acquire(cDevice);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test failing call to RequestDevice with invalid feature
+TEST_F(DeviceCreationTest, RequestDeviceFailure) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::FeatureName invalidFeature = static_cast<wgpu::FeatureName>(WGPUFeatureName_Force32);
+    desc.requiredFeatures = &invalidFeature;
+    desc.requiredFeaturesCount = 1;
+
+    adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
index a1ab605..7276889 100644
--- a/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class BindGroupLayoutMock final : public BindGroupLayoutBase {
-      public:
-        explicit BindGroupLayoutMock(DeviceBase* device) : BindGroupLayoutBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BindGroupLayoutBase::DestroyImpl();
-            });
-        }
-        ~BindGroupLayoutMock() override = default;
+class BindGroupLayoutMock final : public BindGroupLayoutBase {
+  public:
+    explicit BindGroupLayoutMock(DeviceBase* device) : BindGroupLayoutBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->BindGroupLayoutBase::DestroyImpl();
+        });
+    }
+    ~BindGroupLayoutMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
index 5661f2f..8f5ce34 100644
--- a/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class BindGroupMock : public BindGroupBase {
-      public:
-        explicit BindGroupMock(DeviceBase* device) : BindGroupBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BindGroupBase::DestroyImpl();
-            });
-        }
-        ~BindGroupMock() override = default;
+class BindGroupMock : public BindGroupBase {
+  public:
+    explicit BindGroupMock(DeviceBase* device) : BindGroupBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->BindGroupBase::DestroyImpl(); });
+    }
+    ~BindGroupMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/BufferMock.h b/src/dawn/tests/unittests/native/mocks/BufferMock.h
index f44dd8b..d9d2211 100644
--- a/src/dawn/tests/unittests/native/mocks/BufferMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BufferMock.h
@@ -22,27 +22,25 @@
 
 namespace dawn::native {
 
-    class BufferMock : public BufferBase {
-      public:
-        BufferMock(DeviceBase* device, BufferBase::BufferState state) : BufferBase(device, state) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BufferBase::DestroyImpl();
-            });
-        }
-        ~BufferMock() override = default;
+class BufferMock : public BufferBase {
+  public:
+    BufferMock(DeviceBase* device, BufferBase::BufferState state) : BufferBase(device, state) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->BufferBase::DestroyImpl(); });
+    }
+    ~BufferMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        MOCK_METHOD(MaybeError, MapAtCreationImpl, (), (override));
-        MOCK_METHOD(MaybeError,
-                    MapAsyncImpl,
-                    (wgpu::MapMode mode, size_t offset, size_t size),
-                    (override));
-        MOCK_METHOD(void, UnmapImpl, (), (override));
-        MOCK_METHOD(void*, GetMappedPointerImpl, (), (override));
+    MOCK_METHOD(MaybeError, MapAtCreationImpl, (), (override));
+    MOCK_METHOD(MaybeError,
+                MapAsyncImpl,
+                (wgpu::MapMode mode, size_t offset, size_t size),
+                (override));
+    MOCK_METHOD(void, UnmapImpl, (), (override));
+    MOCK_METHOD(void*, GetMappedPointerImpl, (), (override));
 
-        MOCK_METHOD(bool, IsCPUWritableAtCreation, (), (const, override));
-    };
+    MOCK_METHOD(bool, IsCPUWritableAtCreation, (), (const, override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
index 5e16e0e..01843e8 100644
--- a/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
+++ b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class CommandBufferMock : public CommandBufferBase {
-      public:
-        explicit CommandBufferMock(DeviceBase* device) : CommandBufferBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->CommandBufferBase::DestroyImpl();
-            });
-        }
-        ~CommandBufferMock() override = default;
+class CommandBufferMock : public CommandBufferBase {
+  public:
+    explicit CommandBufferMock(DeviceBase* device) : CommandBufferBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->CommandBufferBase::DestroyImpl();
+        });
+    }
+    ~CommandBufferMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
index f4fd5a1..7f6a598 100644
--- a/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
@@ -22,19 +22,19 @@
 
 namespace dawn::native {
 
-    class ComputePipelineMock : public ComputePipelineBase {
-      public:
-        explicit ComputePipelineMock(DeviceBase* device) : ComputePipelineBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->ComputePipelineBase::DestroyImpl();
-            });
-        }
-        ~ComputePipelineMock() override = default;
+class ComputePipelineMock : public ComputePipelineBase {
+  public:
+    explicit ComputePipelineMock(DeviceBase* device) : ComputePipelineBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->ComputePipelineBase::DestroyImpl();
+        });
+    }
+    ~ComputePipelineMock() override = default;
 
-        MOCK_METHOD(MaybeError, Initialize, (), (override));
-        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(MaybeError, Initialize, (), (override));
+    MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/DeviceMock.h b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
index ba54b2b..540a8f5 100644
--- a/src/dawn/tests/unittests/native/mocks/DeviceMock.h
+++ b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
@@ -23,99 +23,98 @@
 
 namespace dawn::native {
 
-    class DeviceMock : public DeviceBase {
-      public:
-        // Exposes some protected functions for testing purposes.
-        using DeviceBase::DestroyObjects;
-        using DeviceBase::SetToggle;
+class DeviceMock : public DeviceBase {
+  public:
+    // Exposes some protected functions for testing purposes.
+    using DeviceBase::DestroyObjects;
+    using DeviceBase::SetToggle;
 
-        MOCK_METHOD(ResultOrError<Ref<CommandBufferBase>>,
-                    CreateCommandBuffer,
-                    (CommandEncoder*, const CommandBufferDescriptor*),
-                    (override));
+    MOCK_METHOD(ResultOrError<Ref<CommandBufferBase>>,
+                CreateCommandBuffer,
+                (CommandEncoder*, const CommandBufferDescriptor*),
+                (override));
 
-        MOCK_METHOD(ResultOrError<std::unique_ptr<StagingBufferBase>>,
-                    CreateStagingBuffer,
-                    (size_t),
-                    (override));
-        MOCK_METHOD(MaybeError,
-                    CopyFromStagingToBuffer,
-                    (StagingBufferBase*, uint64_t, BufferBase*, uint64_t, uint64_t),
-                    (override));
-        MOCK_METHOD(
-            MaybeError,
-            CopyFromStagingToTexture,
-            (const StagingBufferBase*, const TextureDataLayout&, TextureCopy*, const Extent3D&),
-            (override));
+    MOCK_METHOD(ResultOrError<std::unique_ptr<StagingBufferBase>>,
+                CreateStagingBuffer,
+                (size_t),
+                (override));
+    MOCK_METHOD(MaybeError,
+                CopyFromStagingToBuffer,
+                (StagingBufferBase*, uint64_t, BufferBase*, uint64_t, uint64_t),
+                (override));
+    MOCK_METHOD(MaybeError,
+                CopyFromStagingToTexture,
+                (const StagingBufferBase*, const TextureDataLayout&, TextureCopy*, const Extent3D&),
+                (override));
 
-        MOCK_METHOD(uint32_t, GetOptimalBytesPerRowAlignment, (), (const, override));
-        MOCK_METHOD(uint64_t, GetOptimalBufferToTextureCopyOffsetAlignment, (), (const, override));
+    MOCK_METHOD(uint32_t, GetOptimalBytesPerRowAlignment, (), (const, override));
+    MOCK_METHOD(uint64_t, GetOptimalBufferToTextureCopyOffsetAlignment, (), (const, override));
 
-        MOCK_METHOD(float, GetTimestampPeriodInNS, (), (const, override));
+    MOCK_METHOD(float, GetTimestampPeriodInNS, (), (const, override));
 
-        MOCK_METHOD(ResultOrError<Ref<BindGroupBase>>,
-                    CreateBindGroupImpl,
-                    (const BindGroupDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<BindGroupLayoutBase>>,
-                    CreateBindGroupLayoutImpl,
-                    (const BindGroupLayoutDescriptor*, PipelineCompatibilityToken),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<BufferBase>>,
-                    CreateBufferImpl,
-                    (const BufferDescriptor*),
-                    (override));
-        MOCK_METHOD(Ref<ComputePipelineBase>,
-                    CreateUninitializedComputePipelineImpl,
-                    (const ComputePipelineDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<ExternalTextureBase>>,
-                    CreateExternalTextureImpl,
-                    (const ExternalTextureDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<PipelineLayoutBase>>,
-                    CreatePipelineLayoutImpl,
-                    (const PipelineLayoutDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<QuerySetBase>>,
-                    CreateQuerySetImpl,
-                    (const QuerySetDescriptor*),
-                    (override));
-        MOCK_METHOD(Ref<RenderPipelineBase>,
-                    CreateUninitializedRenderPipelineImpl,
-                    (const RenderPipelineDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<SamplerBase>>,
-                    CreateSamplerImpl,
-                    (const SamplerDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<ShaderModuleBase>>,
-                    CreateShaderModuleImpl,
-                    (const ShaderModuleDescriptor*, ShaderModuleParseResult*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<SwapChainBase>>,
-                    CreateSwapChainImpl,
-                    (const SwapChainDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<NewSwapChainBase>>,
-                    CreateSwapChainImpl,
-                    (Surface*, NewSwapChainBase*, const SwapChainDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<TextureBase>>,
-                    CreateTextureImpl,
-                    (const TextureDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<TextureViewBase>>,
-                    CreateTextureViewImpl,
-                    (TextureBase*, const TextureViewDescriptor*),
-                    (override));
+    MOCK_METHOD(ResultOrError<Ref<BindGroupBase>>,
+                CreateBindGroupImpl,
+                (const BindGroupDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<BindGroupLayoutBase>>,
+                CreateBindGroupLayoutImpl,
+                (const BindGroupLayoutDescriptor*, PipelineCompatibilityToken),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<BufferBase>>,
+                CreateBufferImpl,
+                (const BufferDescriptor*),
+                (override));
+    MOCK_METHOD(Ref<ComputePipelineBase>,
+                CreateUninitializedComputePipelineImpl,
+                (const ComputePipelineDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<ExternalTextureBase>>,
+                CreateExternalTextureImpl,
+                (const ExternalTextureDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<PipelineLayoutBase>>,
+                CreatePipelineLayoutImpl,
+                (const PipelineLayoutDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<QuerySetBase>>,
+                CreateQuerySetImpl,
+                (const QuerySetDescriptor*),
+                (override));
+    MOCK_METHOD(Ref<RenderPipelineBase>,
+                CreateUninitializedRenderPipelineImpl,
+                (const RenderPipelineDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<SamplerBase>>,
+                CreateSamplerImpl,
+                (const SamplerDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<ShaderModuleBase>>,
+                CreateShaderModuleImpl,
+                (const ShaderModuleDescriptor*, ShaderModuleParseResult*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<SwapChainBase>>,
+                CreateSwapChainImpl,
+                (const SwapChainDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<NewSwapChainBase>>,
+                CreateSwapChainImpl,
+                (Surface*, NewSwapChainBase*, const SwapChainDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<TextureBase>>,
+                CreateTextureImpl,
+                (const TextureDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<TextureViewBase>>,
+                CreateTextureViewImpl,
+                (TextureBase*, const TextureViewDescriptor*),
+                (override));
 
-        MOCK_METHOD(MaybeError, TickImpl, (), (override));
+    MOCK_METHOD(MaybeError, TickImpl, (), (override));
 
-        MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-        MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
-    };
+    MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
index c5df066..4114fe8 100644
--- a/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class ExternalTextureMock : public ExternalTextureBase {
-      public:
-        explicit ExternalTextureMock(DeviceBase* device) : ExternalTextureBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->ExternalTextureBase::DestroyImpl();
-            });
-        }
-        ~ExternalTextureMock() override = default;
+class ExternalTextureMock : public ExternalTextureBase {
+  public:
+    explicit ExternalTextureMock(DeviceBase* device) : ExternalTextureBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->ExternalTextureBase::DestroyImpl();
+        });
+    }
+    ~ExternalTextureMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
index 090bee6..754cc95 100644
--- a/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
+++ b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class PipelineLayoutMock : public PipelineLayoutBase {
-      public:
-        explicit PipelineLayoutMock(DeviceBase* device) : PipelineLayoutBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->PipelineLayoutBase::DestroyImpl();
-            });
-        }
-        ~PipelineLayoutMock() override = default;
+class PipelineLayoutMock : public PipelineLayoutBase {
+  public:
+    explicit PipelineLayoutMock(DeviceBase* device) : PipelineLayoutBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->PipelineLayoutBase::DestroyImpl();
+        });
+    }
+    ~PipelineLayoutMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/QuerySetMock.h b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
index 65c5726..0d081b7 100644
--- a/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
+++ b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class QuerySetMock : public QuerySetBase {
-      public:
-        explicit QuerySetMock(DeviceBase* device) : QuerySetBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->QuerySetBase::DestroyImpl();
-            });
-        }
-        ~QuerySetMock() override = default;
+class QuerySetMock : public QuerySetBase {
+  public:
+    explicit QuerySetMock(DeviceBase* device) : QuerySetBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->QuerySetBase::DestroyImpl(); });
+    }
+    ~QuerySetMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
index 0aaa5e2..a7b0b62 100644
--- a/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
+++ b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
@@ -22,19 +22,19 @@
 
 namespace dawn::native {
 
-    class RenderPipelineMock : public RenderPipelineBase {
-      public:
-        explicit RenderPipelineMock(DeviceBase* device) : RenderPipelineBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->RenderPipelineBase::DestroyImpl();
-            });
-        }
-        ~RenderPipelineMock() override = default;
+class RenderPipelineMock : public RenderPipelineBase {
+  public:
+    explicit RenderPipelineMock(DeviceBase* device) : RenderPipelineBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->RenderPipelineBase::DestroyImpl();
+        });
+    }
+    ~RenderPipelineMock() override = default;
 
-        MOCK_METHOD(MaybeError, Initialize, (), (override));
-        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(MaybeError, Initialize, (), (override));
+    MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/SamplerMock.h b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
index 2427075..7e75255 100644
--- a/src/dawn/tests/unittests/native/mocks/SamplerMock.h
+++ b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class SamplerMock : public SamplerBase {
-      public:
-        explicit SamplerMock(DeviceBase* device) : SamplerBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->SamplerBase::DestroyImpl();
-            });
-        }
-        ~SamplerMock() override = default;
+class SamplerMock : public SamplerBase {
+  public:
+    explicit SamplerMock(DeviceBase* device) : SamplerBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->SamplerBase::DestroyImpl(); });
+    }
+    ~SamplerMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
index 710737c..d497b75 100644
--- a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
@@ -16,25 +16,23 @@
 
 namespace dawn::native {
 
-    ShaderModuleMock::ShaderModuleMock(DeviceBase* device) : ShaderModuleBase(device) {
-        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-            this->ShaderModuleBase::DestroyImpl();
-        });
-    }
+ShaderModuleMock::ShaderModuleMock(DeviceBase* device) : ShaderModuleBase(device) {
+    ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->ShaderModuleBase::DestroyImpl(); });
+}
 
-    ResultOrError<Ref<ShaderModuleMock>> ShaderModuleMock::Create(DeviceBase* device,
-                                                                  const char* source) {
-        ShaderModuleMock* mock = new ShaderModuleMock(device);
+ResultOrError<Ref<ShaderModuleMock>> ShaderModuleMock::Create(DeviceBase* device,
+                                                              const char* source) {
+    ShaderModuleMock* mock = new ShaderModuleMock(device);
 
-        ShaderModuleWGSLDescriptor wgslDesc;
-        wgslDesc.source = source;
-        ShaderModuleDescriptor desc;
-        desc.nextInChain = &wgslDesc;
+    ShaderModuleWGSLDescriptor wgslDesc;
+    wgslDesc.source = source;
+    ShaderModuleDescriptor desc;
+    desc.nextInChain = &wgslDesc;
 
-        ShaderModuleParseResult parseResult;
-        DAWN_TRY(ValidateShaderModuleDescriptor(device, &desc, &parseResult, nullptr));
-        DAWN_TRY(mock->InitializeBase(&parseResult));
-        return AcquireRef(mock);
-    }
+    ShaderModuleParseResult parseResult;
+    DAWN_TRY(ValidateShaderModuleDescriptor(device, &desc, &parseResult, nullptr));
+    DAWN_TRY(mock->InitializeBase(&parseResult));
+    return AcquireRef(mock);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
index 5857a6e5..d35a319 100644
--- a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
@@ -25,16 +25,16 @@
 
 namespace dawn::native {
 
-    class ShaderModuleMock : public ShaderModuleBase {
-      public:
-        explicit ShaderModuleMock(DeviceBase* device);
-        ~ShaderModuleMock() override = default;
+class ShaderModuleMock : public ShaderModuleBase {
+  public:
+    explicit ShaderModuleMock(DeviceBase* device);
+    ~ShaderModuleMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        // Creates a shader module mock based on the wgsl source.
-        static ResultOrError<Ref<ShaderModuleMock>> Create(DeviceBase* device, const char* source);
-    };
+    // Creates a shader module mock based on the wgsl source.
+    static ResultOrError<Ref<ShaderModuleMock>> Create(DeviceBase* device, const char* source);
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/SwapChainMock.h b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
index c0aa249..8f3386c 100644
--- a/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
+++ b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
@@ -22,24 +22,22 @@
 
 namespace dawn::native {
 
-    class SwapChainMock : public SwapChainBase {
-      public:
-        explicit SwapChainMock(DeviceBase* device) : SwapChainBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->SwapChainBase::DestroyImpl();
-            });
-        }
-        ~SwapChainMock() override = default;
+class SwapChainMock : public SwapChainBase {
+  public:
+    explicit SwapChainMock(DeviceBase* device) : SwapChainBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->SwapChainBase::DestroyImpl(); });
+    }
+    ~SwapChainMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        MOCK_METHOD(void,
-                    APIConfigure,
-                    (wgpu::TextureFormat, wgpu::TextureUsage, uint32_t, uint32_t),
-                    (override));
-        MOCK_METHOD(TextureViewBase*, APIGetCurrentTextureView, (), (override));
-        MOCK_METHOD(void, APIPresent, (), (override));
-    };
+    MOCK_METHOD(void,
+                APIConfigure,
+                (wgpu::TextureFormat, wgpu::TextureUsage, uint32_t, uint32_t),
+                (override));
+    MOCK_METHOD(TextureViewBase*, APIGetCurrentTextureView, (), (override));
+    MOCK_METHOD(void, APIPresent, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/TextureMock.h b/src/dawn/tests/unittests/native/mocks/TextureMock.h
index a6ea6fb..b371b1a 100644
--- a/src/dawn/tests/unittests/native/mocks/TextureMock.h
+++ b/src/dawn/tests/unittests/native/mocks/TextureMock.h
@@ -22,27 +22,23 @@
 
 namespace dawn::native {
 
-    class TextureMock : public TextureBase {
-      public:
-        TextureMock(DeviceBase* device, TextureBase::TextureState state)
-            : TextureBase(device, state) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->TextureBase::DestroyImpl();
-            });
-        }
-        ~TextureMock() override = default;
+class TextureMock : public TextureBase {
+  public:
+    TextureMock(DeviceBase* device, TextureBase::TextureState state) : TextureBase(device, state) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->TextureBase::DestroyImpl(); });
+    }
+    ~TextureMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
-    class TextureViewMock : public TextureViewBase {
-      public:
-        explicit TextureViewMock(TextureBase* texture) : TextureViewBase(texture) {
-        }
-        ~TextureViewMock() override = default;
+class TextureViewMock : public TextureViewBase {
+  public:
+    explicit TextureViewMock(TextureBase* texture) : TextureViewBase(texture) {}
+    ~TextureViewMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
index 1c0abff..a063c4c 100644
--- a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
@@ -15,8 +15,8 @@
 #include <limits>
 #include <memory>
 
-#include "gmock/gmock.h"
 #include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "gmock/gmock.h"
 
 using testing::_;
 using testing::InvokeWithoutArgs;
diff --git a/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
index 4876e92..e2f1906 100644
--- a/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
@@ -20,729 +20,721 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    constexpr uint32_t kRTSize = 4;
-    constexpr uint32_t kFloat32x2Stride = 2 * sizeof(float);
-    constexpr uint32_t kFloat32x4Stride = 4 * sizeof(float);
+constexpr uint32_t kRTSize = 4;
+constexpr uint32_t kFloat32x2Stride = 2 * sizeof(float);
+constexpr uint32_t kFloat32x4Stride = 4 * sizeof(float);
 
-    class DrawVertexAndIndexBufferOOBValidationTests : public ValidationTest {
-      public:
-        // Parameters for testing index buffer
-        struct IndexBufferParams {
-            wgpu::IndexFormat indexFormat;
-            uint64_t indexBufferSize;              // Size for creating index buffer
-            uint64_t indexBufferOffsetForEncoder;  // Offset for SetIndexBuffer in encoder
-            uint64_t indexBufferSizeForEncoder;    // Size for SetIndexBuffer in encoder
-            uint32_t maxValidIndexNumber;  // max number of {indexCount + firstIndex} for this set
-                                           // of parameters
-        };
+class DrawVertexAndIndexBufferOOBValidationTests : public ValidationTest {
+  public:
+    // Parameters for testing index buffer
+    struct IndexBufferParams {
+        wgpu::IndexFormat indexFormat;
+        uint64_t indexBufferSize;              // Size for creating index buffer
+        uint64_t indexBufferOffsetForEncoder;  // Offset for SetIndexBuffer in encoder
+        uint64_t indexBufferSizeForEncoder;    // Size for SetIndexBuffer in encoder
+        uint32_t maxValidIndexNumber;  // max number of {indexCount + firstIndex} for this set
+                                       // of parameters
+    };
 
-        // Parameters for testing vertex-step-mode and instance-step-mode vertex buffer
-        struct VertexBufferParams {
-            uint32_t bufferStride;
-            uint64_t bufferSize;              // Size for creating vertex buffer
-            uint64_t bufferOffsetForEncoder;  // Offset for SetVertexBuffer in encoder
-            uint64_t bufferSizeForEncoder;    // Size for SetVertexBuffer in encoder
-            uint32_t maxValidAccessNumber;    // max number of valid access time for this set of
-                                              // parameters, i.e. {vertexCount + firstVertex} for
-            // vertex-step-mode, and {instanceCount + firstInstance}
-            // for instance-step-mode
-        };
+    // Parameters for testing vertex-step-mode and instance-step-mode vertex buffer
+    struct VertexBufferParams {
+        uint32_t bufferStride;
+        uint64_t bufferSize;              // Size for creating vertex buffer
+        uint64_t bufferOffsetForEncoder;  // Offset for SetVertexBuffer in encoder
+        uint64_t bufferSizeForEncoder;    // Size for SetVertexBuffer in encoder
+        uint32_t maxValidAccessNumber;    // max number of valid access time for this set of
+                                          // parameters, i.e. {vertexCount + firstVertex} for
+        // vertex-step-mode, and {instanceCount + firstInstance}
+        // for instance-step-mode
+    };
 
-        // Parameters for setIndexBuffer
-        struct IndexBufferDesc {
-            const wgpu::Buffer buffer;
-            wgpu::IndexFormat indexFormat;
-            uint64_t offset = 0;
-            uint64_t size = wgpu::kWholeSize;
-        };
+    // Parameters for setIndexBuffer
+    struct IndexBufferDesc {
+        const wgpu::Buffer buffer;
+        wgpu::IndexFormat indexFormat;
+        uint64_t offset = 0;
+        uint64_t size = wgpu::kWholeSize;
+    };
 
-        // Parameters for setVertexBuffer
-        struct VertexBufferSpec {
-            uint32_t slot;
-            const wgpu::Buffer buffer;
-            uint64_t offset = 0;
-            uint64_t size = wgpu::kWholeSize;
-        };
-        using VertexBufferList = std::vector<VertexBufferSpec>;
+    // Parameters for setVertexBuffer
+    struct VertexBufferSpec {
+        uint32_t slot;
+        const wgpu::Buffer buffer;
+        uint64_t offset = 0;
+        uint64_t size = wgpu::kWholeSize;
+    };
+    using VertexBufferList = std::vector<VertexBufferSpec>;
 
-        // Buffer layout parameters for creating pipeline
-        struct PipelineVertexBufferAttributeDesc {
-            uint32_t shaderLocation;
-            wgpu::VertexFormat format;
-            uint64_t offset = 0;
-        };
-        struct PipelineVertexBufferDesc {
-            uint64_t arrayStride;
-            wgpu::VertexStepMode stepMode;
-            std::vector<PipelineVertexBufferAttributeDesc> attributes = {};
-        };
+    // Buffer layout parameters for creating pipeline
+    struct PipelineVertexBufferAttributeDesc {
+        uint32_t shaderLocation;
+        wgpu::VertexFormat format;
+        uint64_t offset = 0;
+    };
+    struct PipelineVertexBufferDesc {
+        uint64_t arrayStride;
+        wgpu::VertexStepMode stepMode;
+        std::vector<PipelineVertexBufferAttributeDesc> attributes = {};
+    };
 
-        void SetUp() override {
-            ValidationTest::SetUp();
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-            fsModule = utils::CreateShaderModule(device, R"(
+        fsModule = utils::CreateShaderModule(device, R"(
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })");
-        }
+    }
 
-        const wgpu::RenderPassDescriptor* GetBasicRenderPassDescriptor() const {
-            return &renderPass.renderPassInfo;
-        }
+    const wgpu::RenderPassDescriptor* GetBasicRenderPassDescriptor() const {
+        return &renderPass.renderPassInfo;
+    }
 
-        wgpu::Buffer CreateBuffer(uint64_t size,
-                                  wgpu::BufferUsage usage = wgpu::BufferUsage::Vertex) {
-            wgpu::BufferDescriptor descriptor;
-            descriptor.size = size;
-            descriptor.usage = usage;
+    wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage = wgpu::BufferUsage::Vertex) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
 
-            return device.CreateBuffer(&descriptor);
-        }
+        return device.CreateBuffer(&descriptor);
+    }
 
-        wgpu::ShaderModule CreateVertexShaderModuleWithBuffer(
-            std::vector<PipelineVertexBufferDesc> bufferDescList) {
-            uint32_t attributeCount = 0;
-            std::stringstream inputStringStream;
+    wgpu::ShaderModule CreateVertexShaderModuleWithBuffer(
+        std::vector<PipelineVertexBufferDesc> bufferDescList) {
+        uint32_t attributeCount = 0;
+        std::stringstream inputStringStream;
 
-            for (auto buffer : bufferDescList) {
-                for (auto attr : buffer.attributes) {
-                    // @location({shaderLocation}) var_{id} : {typeString},
-                    inputStringStream << "@location(" << attr.shaderLocation << ") var_"
-                                      << attributeCount << " : vec4<f32>,";
-                    attributeCount++;
-                }
+        for (auto buffer : bufferDescList) {
+            for (auto attr : buffer.attributes) {
+                // @location({shaderLocation}) var_{id} : {typeString},
+                inputStringStream << "@location(" << attr.shaderLocation << ") var_"
+                                  << attributeCount << " : vec4<f32>,";
+                attributeCount++;
             }
+        }
 
-            std::stringstream shaderStringStream;
+        std::stringstream shaderStringStream;
 
-            shaderStringStream << R"(
+        shaderStringStream << R"(
             @stage(vertex)
             fn main()" << inputStringStream.str()
-                               << R"() -> @builtin(position) vec4<f32> {
+                           << R"() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })";
 
-            return utils::CreateShaderModule(device, shaderStringStream.str().c_str());
-        }
+        return utils::CreateShaderModule(device, shaderStringStream.str().c_str());
+    }
 
-        // Create a render pipeline with given buffer layout description, using a vertex shader
-        // module automatically generated from the buffer description.
-        wgpu::RenderPipeline CreateRenderPipelineWithBufferDesc(
-            std::vector<PipelineVertexBufferDesc> bufferDescList) {
-            utils::ComboRenderPipelineDescriptor descriptor;
+    // Create a render pipeline with given buffer layout description, using a vertex shader
+    // module automatically generated from the buffer description.
+    wgpu::RenderPipeline CreateRenderPipelineWithBufferDesc(
+        std::vector<PipelineVertexBufferDesc> bufferDescList) {
+        utils::ComboRenderPipelineDescriptor descriptor;
 
-            descriptor.vertex.module = CreateVertexShaderModuleWithBuffer(bufferDescList);
-            descriptor.cFragment.module = fsModule;
-            descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.vertex.module = CreateVertexShaderModuleWithBuffer(bufferDescList);
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
 
-            descriptor.vertex.bufferCount = bufferDescList.size();
+        descriptor.vertex.bufferCount = bufferDescList.size();
 
-            size_t attributeCount = 0;
+        size_t attributeCount = 0;
 
-            for (size_t bufferCount = 0; bufferCount < bufferDescList.size(); bufferCount++) {
-                auto bufferDesc = bufferDescList[bufferCount];
-                descriptor.cBuffers[bufferCount].arrayStride = bufferDesc.arrayStride;
-                descriptor.cBuffers[bufferCount].stepMode = bufferDesc.stepMode;
-                if (bufferDesc.attributes.size() > 0) {
-                    descriptor.cBuffers[bufferCount].attributeCount = bufferDesc.attributes.size();
-                    descriptor.cBuffers[bufferCount].attributes =
-                        &descriptor.cAttributes[attributeCount];
-                    for (auto attribute : bufferDesc.attributes) {
-                        descriptor.cAttributes[attributeCount].shaderLocation =
-                            attribute.shaderLocation;
-                        descriptor.cAttributes[attributeCount].format = attribute.format;
-                        descriptor.cAttributes[attributeCount].offset = attribute.offset;
-                        attributeCount++;
-                    }
-                } else {
-                    descriptor.cBuffers[bufferCount].attributeCount = 0;
-                    descriptor.cBuffers[bufferCount].attributes = nullptr;
+        for (size_t bufferCount = 0; bufferCount < bufferDescList.size(); bufferCount++) {
+            auto bufferDesc = bufferDescList[bufferCount];
+            descriptor.cBuffers[bufferCount].arrayStride = bufferDesc.arrayStride;
+            descriptor.cBuffers[bufferCount].stepMode = bufferDesc.stepMode;
+            if (bufferDesc.attributes.size() > 0) {
+                descriptor.cBuffers[bufferCount].attributeCount = bufferDesc.attributes.size();
+                descriptor.cBuffers[bufferCount].attributes =
+                    &descriptor.cAttributes[attributeCount];
+                for (auto attribute : bufferDesc.attributes) {
+                    descriptor.cAttributes[attributeCount].shaderLocation =
+                        attribute.shaderLocation;
+                    descriptor.cAttributes[attributeCount].format = attribute.format;
+                    descriptor.cAttributes[attributeCount].offset = attribute.offset;
+                    attributeCount++;
                 }
-            }
-
-            descriptor.cTargets[0].format = renderPass.colorFormat;
-
-            return device.CreateRenderPipeline(&descriptor);
-        }
-
-        // Create a render pipeline using only one vertex-step-mode Float32x4 buffer
-        wgpu::RenderPipeline CreateBasicRenderPipeline(uint32_t bufferStride = kFloat32x4Stride) {
-            DAWN_ASSERT(bufferStride >= kFloat32x4Stride);
-
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {bufferStride, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        // Create a render pipeline using one vertex-step-mode Float32x4 buffer and one
-        // instance-step-mode Float32x2 buffer
-        wgpu::RenderPipeline CreateBasicRenderPipelineWithInstance(
-            uint32_t bufferStride1 = kFloat32x4Stride,
-            uint32_t bufferStride2 = kFloat32x2Stride) {
-            DAWN_ASSERT(bufferStride1 >= kFloat32x4Stride);
-            DAWN_ASSERT(bufferStride2 >= kFloat32x2Stride);
-
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {bufferStride1, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
-                {bufferStride2,
-                 wgpu::VertexStepMode::Instance,
-                 {{3, wgpu::VertexFormat::Float32x2}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        // Create a render pipeline using one vertex-step-mode and one instance-step-mode buffer,
-        // both with a zero array stride. The minimal size of vertex step mode buffer should be 28,
-        // and the minimal size of instance step mode buffer should be 20.
-        wgpu::RenderPipeline CreateBasicRenderPipelineWithZeroArrayStride() {
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {0,
-                 wgpu::VertexStepMode::Vertex,
-                 {{0, wgpu::VertexFormat::Float32x4, 0}, {1, wgpu::VertexFormat::Float32x2, 20}}},
-                {0,
-                 wgpu::VertexStepMode::Instance,
-                 // Two attributes are overlapped within this instance step mode vertex buffer
-                 {{3, wgpu::VertexFormat::Float32x4, 4}, {7, wgpu::VertexFormat::Float32x3, 0}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        void TestRenderPassDraw(const wgpu::RenderPipeline& pipeline,
-                                VertexBufferList vertexBufferList,
-                                uint32_t vertexCount,
-                                uint32_t instanceCount,
-                                uint32_t firstVertex,
-                                uint32_t firstInstance,
-                                bool isSuccess) {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder =
-                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-            renderPassEncoder.SetPipeline(pipeline);
-
-            for (auto vertexBufferParam : vertexBufferList) {
-                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
-                                                  vertexBufferParam.offset, vertexBufferParam.size);
-            }
-            renderPassEncoder.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
-            renderPassEncoder.End();
-
-            if (isSuccess) {
-                encoder.Finish();
             } else {
-                ASSERT_DEVICE_ERROR(encoder.Finish());
+                descriptor.cBuffers[bufferCount].attributeCount = 0;
+                descriptor.cBuffers[bufferCount].attributes = nullptr;
             }
         }
 
-        void TestRenderPassDrawIndexed(const wgpu::RenderPipeline& pipeline,
-                                       IndexBufferDesc indexBuffer,
-                                       VertexBufferList vertexBufferList,
-                                       uint32_t indexCount,
-                                       uint32_t instanceCount,
-                                       uint32_t firstIndex,
-                                       int32_t baseVertex,
-                                       uint32_t firstInstance,
-                                       bool isSuccess) {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder =
-                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-            renderPassEncoder.SetPipeline(pipeline);
+        descriptor.cTargets[0].format = renderPass.colorFormat;
 
-            renderPassEncoder.SetIndexBuffer(indexBuffer.buffer, indexBuffer.indexFormat,
-                                             indexBuffer.offset, indexBuffer.size);
+        return device.CreateRenderPipeline(&descriptor);
+    }
 
-            for (auto vertexBufferParam : vertexBufferList) {
-                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
-                                                  vertexBufferParam.offset, vertexBufferParam.size);
-            }
-            renderPassEncoder.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex,
-                                          firstInstance);
-            renderPassEncoder.End();
+    // Create a render pipeline using only one vertex-step-mode Float32x4 buffer
+    wgpu::RenderPipeline CreateBasicRenderPipeline(uint32_t bufferStride = kFloat32x4Stride) {
+        DAWN_ASSERT(bufferStride >= kFloat32x4Stride);
 
-            if (isSuccess) {
-                encoder.Finish();
-            } else {
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {bufferStride, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    // Create a render pipeline using one vertex-step-mode Float32x4 buffer and one
+    // instance-step-mode Float32x2 buffer
+    wgpu::RenderPipeline CreateBasicRenderPipelineWithInstance(
+        uint32_t bufferStride1 = kFloat32x4Stride,
+        uint32_t bufferStride2 = kFloat32x2Stride) {
+        DAWN_ASSERT(bufferStride1 >= kFloat32x4Stride);
+        DAWN_ASSERT(bufferStride2 >= kFloat32x2Stride);
+
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {bufferStride1, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+            {bufferStride2, wgpu::VertexStepMode::Instance, {{3, wgpu::VertexFormat::Float32x2}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    // Create a render pipeline using one vertex-step-mode and one instance-step-mode buffer,
+    // both with a zero array stride. The minimal size of vertex step mode buffer should be 28,
+    // and the minimal size of instance step mode buffer should be 20.
+    wgpu::RenderPipeline CreateBasicRenderPipelineWithZeroArrayStride() {
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {0,
+             wgpu::VertexStepMode::Vertex,
+             {{0, wgpu::VertexFormat::Float32x4, 0}, {1, wgpu::VertexFormat::Float32x2, 20}}},
+            {0,
+             wgpu::VertexStepMode::Instance,
+             // Two attributes are overlapped within this instance step mode vertex buffer
+             {{3, wgpu::VertexFormat::Float32x4, 4}, {7, wgpu::VertexFormat::Float32x3, 0}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    void TestRenderPassDraw(const wgpu::RenderPipeline& pipeline,
+                            VertexBufferList vertexBufferList,
+                            uint32_t vertexCount,
+                            uint32_t instanceCount,
+                            uint32_t firstVertex,
+                            uint32_t firstInstance,
+                            bool isSuccess) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder =
+            encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+        renderPassEncoder.SetPipeline(pipeline);
+
+        for (auto vertexBufferParam : vertexBufferList) {
+            renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                              vertexBufferParam.offset, vertexBufferParam.size);
         }
+        renderPassEncoder.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+        renderPassEncoder.End();
 
-        // Parameters list for index buffer. Should cover all IndexFormat, and the zero/non-zero
-        // offset and size case in SetIndexBuffer
-        const std::vector<IndexBufferParams> kIndexParamsList = {
-            {wgpu::IndexFormat::Uint32, 12 * sizeof(uint32_t), 0, wgpu::kWholeSize, 12},
-            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), sizeof(uint32_t), wgpu::kWholeSize,
-             12},
-            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), 0, 12 * sizeof(uint32_t), 12},
-            {wgpu::IndexFormat::Uint32, 14 * sizeof(uint32_t), sizeof(uint32_t),
-             12 * sizeof(uint32_t), 12},
+        if (isSuccess) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
 
-            {wgpu::IndexFormat::Uint16, 12 * sizeof(uint16_t), 0, wgpu::kWholeSize, 12},
-            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), sizeof(uint16_t), wgpu::kWholeSize,
-             12},
-            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), 0, 12 * sizeof(uint16_t), 12},
-            {wgpu::IndexFormat::Uint16, 14 * sizeof(uint16_t), sizeof(uint16_t),
-             12 * sizeof(uint16_t), 12},
-        };
-        // Parameters list for vertex-step-mode buffer. These parameters should cover different
-        // stride, buffer size, SetVertexBuffer size and offset.
-        const std::vector<VertexBufferParams> kVertexParamsList = {
-            // For stride = kFloat32x4Stride
-            {kFloat32x4Stride, 3 * kFloat32x4Stride, 0, wgpu::kWholeSize, 3},
-            // Non-zero offset
-            {kFloat32x4Stride, 4 * kFloat32x4Stride, kFloat32x4Stride, wgpu::kWholeSize, 3},
-            // Non-default size
-            {kFloat32x4Stride, 4 * kFloat32x4Stride, 0, 3 * kFloat32x4Stride, 3},
-            // Non-zero offset and size
-            {kFloat32x4Stride, 5 * kFloat32x4Stride, kFloat32x4Stride, 3 * kFloat32x4Stride, 3},
-            // For stride = 2 * kFloat32x4Stride
-            {(2 * kFloat32x4Stride), 3 * (2 * kFloat32x4Stride), 0, wgpu::kWholeSize, 3},
-            // Non-zero offset
-            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
-             wgpu::kWholeSize, 3},
-            // Non-default size
-            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), 0, 3 * (2 * kFloat32x4Stride), 3},
-            // Non-zero offset and size
-            {(2 * kFloat32x4Stride), 5 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
-             3 * (2 * kFloat32x4Stride), 3},
-        };
-        // Parameters list for instance-step-mode buffer.
-        const std::vector<VertexBufferParams> kInstanceParamsList = {
-            // For stride = kFloat32x2Stride
-            {kFloat32x2Stride, 5 * kFloat32x2Stride, 0, wgpu::kWholeSize, 5},
-            // Non-zero offset
-            {kFloat32x2Stride, 6 * kFloat32x2Stride, kFloat32x2Stride, wgpu::kWholeSize, 5},
-            // Non-default size
-            {kFloat32x2Stride, 6 * kFloat32x2Stride, 0, 5 * kFloat32x2Stride, 5},
-            // Non-zero offset and size
-            {kFloat32x2Stride, 7 * kFloat32x2Stride, kFloat32x2Stride, 5 * kFloat32x2Stride, 5},
-            // For stride = 3 * kFloat32x2Stride
-            {(3 * kFloat32x2Stride), 5 * (3 * kFloat32x2Stride), 0, wgpu::kWholeSize, 5},
-            // Non-zero offset
-            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
-             wgpu::kWholeSize, 5},
-            // Non-default size
-            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), 0, 5 * (3 * kFloat32x2Stride), 5},
-            // Non-zero offset and size
-            {(3 * kFloat32x2Stride), 7 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
-             5 * (3 * kFloat32x2Stride), 5},
-        };
+    void TestRenderPassDrawIndexed(const wgpu::RenderPipeline& pipeline,
+                                   IndexBufferDesc indexBuffer,
+                                   VertexBufferList vertexBufferList,
+                                   uint32_t indexCount,
+                                   uint32_t instanceCount,
+                                   uint32_t firstIndex,
+                                   int32_t baseVertex,
+                                   uint32_t firstInstance,
+                                   bool isSuccess) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder =
+            encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+        renderPassEncoder.SetPipeline(pipeline);
 
-      private:
-        wgpu::ShaderModule fsModule;
-        utils::BasicRenderPass renderPass;
+        renderPassEncoder.SetIndexBuffer(indexBuffer.buffer, indexBuffer.indexFormat,
+                                         indexBuffer.offset, indexBuffer.size);
+
+        for (auto vertexBufferParam : vertexBufferList) {
+            renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                              vertexBufferParam.offset, vertexBufferParam.size);
+        }
+        renderPassEncoder.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex,
+                                      firstInstance);
+        renderPassEncoder.End();
+
+        if (isSuccess) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Parameters list for index buffer. Should cover all IndexFormat, and the zero/non-zero
+    // offset and size case in SetIndexBuffer
+    const std::vector<IndexBufferParams> kIndexParamsList = {
+        {wgpu::IndexFormat::Uint32, 12 * sizeof(uint32_t), 0, wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), sizeof(uint32_t), wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), 0, 12 * sizeof(uint32_t), 12},
+        {wgpu::IndexFormat::Uint32, 14 * sizeof(uint32_t), sizeof(uint32_t), 12 * sizeof(uint32_t),
+         12},
+
+        {wgpu::IndexFormat::Uint16, 12 * sizeof(uint16_t), 0, wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), sizeof(uint16_t), wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), 0, 12 * sizeof(uint16_t), 12},
+        {wgpu::IndexFormat::Uint16, 14 * sizeof(uint16_t), sizeof(uint16_t), 12 * sizeof(uint16_t),
+         12},
+    };
+    // Parameters list for vertex-step-mode buffer. These parameters should cover different
+    // stride, buffer size, SetVertexBuffer size and offset.
+    const std::vector<VertexBufferParams> kVertexParamsList = {
+        // For stride = kFloat32x4Stride
+        {kFloat32x4Stride, 3 * kFloat32x4Stride, 0, wgpu::kWholeSize, 3},
+        // Non-zero offset
+        {kFloat32x4Stride, 4 * kFloat32x4Stride, kFloat32x4Stride, wgpu::kWholeSize, 3},
+        // Non-default size
+        {kFloat32x4Stride, 4 * kFloat32x4Stride, 0, 3 * kFloat32x4Stride, 3},
+        // Non-zero offset and size
+        {kFloat32x4Stride, 5 * kFloat32x4Stride, kFloat32x4Stride, 3 * kFloat32x4Stride, 3},
+        // For stride = 2 * kFloat32x4Stride
+        {(2 * kFloat32x4Stride), 3 * (2 * kFloat32x4Stride), 0, wgpu::kWholeSize, 3},
+        // Non-zero offset
+        {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+         wgpu::kWholeSize, 3},
+        // Non-default size
+        {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), 0, 3 * (2 * kFloat32x4Stride), 3},
+        // Non-zero offset and size
+        {(2 * kFloat32x4Stride), 5 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+         3 * (2 * kFloat32x4Stride), 3},
+    };
+    // Parameters list for instance-step-mode buffer.
+    const std::vector<VertexBufferParams> kInstanceParamsList = {
+        // For stride = kFloat32x2Stride
+        {kFloat32x2Stride, 5 * kFloat32x2Stride, 0, wgpu::kWholeSize, 5},
+        // Non-zero offset
+        {kFloat32x2Stride, 6 * kFloat32x2Stride, kFloat32x2Stride, wgpu::kWholeSize, 5},
+        // Non-default size
+        {kFloat32x2Stride, 6 * kFloat32x2Stride, 0, 5 * kFloat32x2Stride, 5},
+        // Non-zero offset and size
+        {kFloat32x2Stride, 7 * kFloat32x2Stride, kFloat32x2Stride, 5 * kFloat32x2Stride, 5},
+        // For stride = 3 * kFloat32x2Stride
+        {(3 * kFloat32x2Stride), 5 * (3 * kFloat32x2Stride), 0, wgpu::kWholeSize, 5},
+        // Non-zero offset
+        {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+         wgpu::kWholeSize, 5},
+        // Non-default size
+        {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), 0, 5 * (3 * kFloat32x2Stride), 5},
+        // Non-zero offset and size
+        {(3 * kFloat32x2Stride), 7 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+         5 * (3 * kFloat32x2Stride), 5},
     };
 
-    // Control case for Draw
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawBasic) {
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+  private:
+    wgpu::ShaderModule fsModule;
+    utils::BasicRenderPass renderPass;
+};
 
-        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+// Control case for Draw
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawBasic) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-        {
-            // Implicit size
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, true);
-        }
+    wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
 
-        {
-            // Explicit zero size
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, 0}};
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, false);
-        }
+    {
+        // Implicit size
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, true);
     }
 
-    // Verify vertex buffer OOB for non-instanced Draw are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithoutInstance) {
-        for (VertexBufferParams params : kVertexParamsList) {
-            // Create a render pipeline without instance step mode buffer
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline(params.bufferStride);
+    {
+        // Explicit zero size
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, 0}};
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, false);
+    }
+}
 
-            // Build vertex buffer for 3 vertices
-            wgpu::Buffer vertexBuffer = CreateBuffer(params.bufferSize);
+// Verify vertex buffer OOB for non-instanced Draw are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithoutInstance) {
+    for (VertexBufferParams params : kVertexParamsList) {
+        // Create a render pipeline without instance step mode buffer
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline(params.bufferStride);
+
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer = CreateBuffer(params.bufferSize);
+        VertexBufferList vertexBufferList = {
+            {0, vertexBuffer, params.bufferOffsetForEncoder, params.bufferSizeForEncoder}};
+
+        uint32_t n = params.maxValidAccessNumber;
+        // It is ok to draw n vertices with vertex buffer
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 0, 0, true);
+        // It is ok to draw n-1 vertices with offset 1
+        TestRenderPassDraw(pipeline, vertexBufferList, n - 1, 1, 1, 0, true);
+        // Drawing more vertices will cause OOB, even if not enough for another primitive
+        TestRenderPassDraw(pipeline, vertexBufferList, n + 1, 1, 0, 0, false);
+        // Drawing n vertices will non-zero offset will cause OOB
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 1, 0, false);
+        // It is ok to draw any number of instances, as we have no instance-mode buffer
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 5, true);
+    }
+}
+
+// Verify vertex buffer OOB for instanced Draw are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithInstance) {
+    for (VertexBufferParams vertexParams : kVertexParamsList) {
+        for (VertexBufferParams instanceParams : kInstanceParamsList) {
+            // Create pipeline with given buffer stride
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                vertexParams.bufferStride, instanceParams.bufferStride);
+
+            // Build vertex buffer
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
             VertexBufferList vertexBufferList = {
-                {0, vertexBuffer, params.bufferOffsetForEncoder, params.bufferSizeForEncoder}};
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder},
+            };
 
-            uint32_t n = params.maxValidAccessNumber;
-            // It is ok to draw n vertices with vertex buffer
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 0, 0, true);
-            // It is ok to draw n-1 vertices with offset 1
-            TestRenderPassDraw(pipeline, vertexBufferList, n - 1, 1, 1, 0, true);
-            // Drawing more vertices will cause OOB, even if not enough for another primitive
-            TestRenderPassDraw(pipeline, vertexBufferList, n + 1, 1, 0, 0, false);
-            // Drawing n vertices will non-zero offset will cause OOB
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 1, 0, false);
-            // It is ok to draw any number of instances, as we have no instance-mode buffer
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 5, true);
+            uint32_t vert = vertexParams.maxValidAccessNumber;
+            uint32_t inst = instanceParams.maxValidAccessNumber;
+            // It is ok to draw vert vertices
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert - 1, 1, 1, 0, true);
+            // It is ok to draw vert vertices and inst instences
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst - 1, 0, 1, true);
+            // more vertices causing OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 1, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, inst, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 0, false);
+            // more instances causing OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 1, false);
+            // Both OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 1, false);
         }
     }
+}
 
-    // Verify vertex buffer OOB for instanced Draw are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithInstance) {
-        for (VertexBufferParams vertexParams : kVertexParamsList) {
-            for (VertexBufferParams instanceParams : kInstanceParamsList) {
-                // Create pipeline with given buffer stride
-                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
-                    vertexParams.bufferStride, instanceParams.bufferStride);
+// Control case for DrawIndexed
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedBasic) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-                // Build vertex buffer
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+    // Build index buffer for 12 indexes
+    wgpu::Buffer indexBuffer = CreateBuffer(12 * sizeof(uint32_t), wgpu::BufferUsage::Index);
 
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder},
-                };
+    // Build vertex buffer for 3 vertices
+    wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+    VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
 
-                uint32_t vert = vertexParams.maxValidAccessNumber;
-                uint32_t inst = instanceParams.maxValidAccessNumber;
-                // It is ok to draw vert vertices
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 0, 0, true);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert - 1, 1, 1, 0, true);
-                // It is ok to draw vert vertices and inst instences
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 0, true);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst - 1, 0, 1, true);
-                // more vertices causing OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 1, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, inst, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 0, false);
-                // more instances causing OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 1, false);
-                // Both OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 1, false);
+    IndexBufferDesc indexBufferDesc = {indexBuffer, wgpu::IndexFormat::Uint32};
+
+    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 1, 0, 0, 0, true);
+}
+
+// Verify index buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedIndexBufferOOB) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+
+    for (IndexBufferParams params : kIndexParamsList) {
+        // Build index buffer use given params
+        wgpu::Buffer indexBuffer = CreateBuffer(params.indexBufferSize, wgpu::BufferUsage::Index);
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+        // Build vertex buffer for 5 instances
+        wgpu::Buffer instanceBuffer = CreateBuffer(5 * kFloat32x2Stride);
+
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer, 0, wgpu::kWholeSize}};
+
+        IndexBufferDesc indexBufferDesc = {indexBuffer, params.indexFormat,
+                                           params.indexBufferOffsetForEncoder,
+                                           params.indexBufferSizeForEncoder};
+
+        uint32_t n = params.maxValidIndexNumber;
+
+        // Control case
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n - 1, 5, 1, 0, 0,
+                                  true);
+        // Index buffer OOB, indexCount too large
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 0, 0,
+                                  false);
+        // Index buffer OOB, indexCount + firstIndex too large
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 1, 0, 0,
+                                  false);
+
+        if (!HasToggleEnabled("disable_base_vertex")) {
+            // baseVertex is not considered in CPU validation and has no effect on validation
+            // Although baseVertex is too large, it will still pass
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 100, 0,
+                                      true);
+            // Index buffer OOB, indexCount too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 100,
+                                      0, false);
+        }
+    }
+}
+
+// Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedVertexBufferOOB) {
+    for (VertexBufferParams vertexParams : kVertexParamsList) {
+        for (VertexBufferParams instanceParams : kInstanceParamsList) {
+            // Create pipeline with given buffer stride
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                vertexParams.bufferStride, instanceParams.bufferStride);
+
+            auto indexFormat = wgpu::IndexFormat::Uint32;
+            auto indexStride = sizeof(uint32_t);
+
+            // Build index buffer for 12 indexes
+            wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+            // Build vertex buffer for vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            // Build vertex buffer for instances
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+            VertexBufferList vertexBufferList = {
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder}};
+
+            IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+            uint32_t inst = instanceParams.maxValidAccessNumber;
+            // Control case
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0, 0,
+                                      0, true);
+            // Vertex buffer (stepMode = instance) OOB, instanceCount too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst + 1, 0,
+                                      0, 0, false);
+
+            if (!HasToggleEnabled("disable_base_instance")) {
+                // firstInstance is considered in CPU validation
+                // Vertex buffer (stepMode = instance) in bound
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst - 1,
+                                          0, 0, 1, true);
+                // Vertex buffer (stepMode = instance) OOB, instanceCount + firstInstance too
+                // large
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0,
+                                          0, 1, false);
             }
         }
     }
+}
 
-    // Control case for DrawIndexed
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedBasic) {
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+// Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, ZeroArrayStrideVertexBufferOOB) {
+    // In this test, we use VertexBufferParams.maxValidAccessNumber > 0 to indicate that such
+    // buffer parameter meet the requirement of pipeline, and maxValidAccessNumber == 0 to
+    // indicate that such buffer parameter will cause OOB.
+    const std::vector<VertexBufferParams> kVertexParamsListForZeroStride = {
+        // Control case
+        {0, 28, 0, wgpu::kWholeSize, 1},
+        // Non-zero offset
+        {0, 28, 4, wgpu::kWholeSize, 0},
+        {0, 28, 28, wgpu::kWholeSize, 0},
+        // Non-default size
+        {0, 28, 0, 28, 1},
+        {0, 28, 0, 27, 0},
+        // Non-zero offset and size
+        {0, 32, 4, 28, 1},
+        {0, 31, 4, 27, 0},
+        {0, 31, 4, wgpu::kWholeSize, 0},
+    };
 
-        // Build index buffer for 12 indexes
-        wgpu::Buffer indexBuffer = CreateBuffer(12 * sizeof(uint32_t), wgpu::BufferUsage::Index);
+    const std::vector<VertexBufferParams> kInstanceParamsListForZeroStride = {
+        // Control case
+        {0, 20, 0, wgpu::kWholeSize, 1},
+        // Non-zero offset
+        {0, 24, 4, wgpu::kWholeSize, 1},
+        {0, 23, 4, wgpu::kWholeSize, 0},
+        {0, 20, 4, wgpu::kWholeSize, 0},
+        {0, 20, 20, wgpu::kWholeSize, 0},
+        // Non-default size
+        {0, 21, 0, 20, 1},
+        {0, 20, 0, 19, 0},
+        // Non-zero offset and size
+        {0, 30, 4, 20, 1},
+        {0, 30, 4, 19, 0},
+    };
 
-        // Build vertex buffer for 3 vertices
-        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
-        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+    // Build a pipeline that require a vertex step mode vertex buffer no smaller than 28 bytes
+    // and an instance step mode buffer no smaller than 20 bytes
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithZeroArrayStride();
 
-        IndexBufferDesc indexBufferDesc = {indexBuffer, wgpu::IndexFormat::Uint32};
+    for (VertexBufferParams vertexParams : kVertexParamsListForZeroStride) {
+        for (VertexBufferParams instanceParams : kInstanceParamsListForZeroStride) {
+            auto indexFormat = wgpu::IndexFormat::Uint32;
+            auto indexStride = sizeof(uint32_t);
 
-        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 1, 0, 0, 0,
-                                  true);
+            // Build index buffer for 12 indexes
+            wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+            // Build vertex buffer for vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            // Build vertex buffer for instances
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+            VertexBufferList vertexBufferList = {
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder}};
+
+            IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+            const bool isSuccess = (vertexParams.maxValidAccessNumber > 0) &&
+                                   (instanceParams.maxValidAccessNumber > 0);
+            // vertexCount and instanceCount doesn't matter, as array stride is zero and all
+            // vertex/instance access the same space of buffer
+            TestRenderPassDraw(pipeline, vertexBufferList, 100, 100, 0, 0, isSuccess);
+            // indexCount doesn't matter as long as no index buffer OOB happened
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 100, 0, 0, 0,
+                                      isSuccess);
+        }
     }
+}
 
-    // Verify index buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedIndexBufferOOB) {
+// Verify that if setVertexBuffer and/or setIndexBuffer for multiple times, only the last one is
+// taken into account
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, SetBufferMultipleTime) {
+    wgpu::IndexFormat indexFormat = wgpu::IndexFormat::Uint32;
+    uint32_t indexStride = sizeof(uint32_t);
+
+    // Build index buffer for 11 indexes
+    wgpu::Buffer indexBuffer11 = CreateBuffer(11 * indexStride, wgpu::BufferUsage::Index);
+    // Build index buffer for 12 indexes
+    wgpu::Buffer indexBuffer12 = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+    // Build vertex buffer for 2 vertices
+    wgpu::Buffer vertexBuffer2 = CreateBuffer(2 * kFloat32x4Stride);
+    // Build vertex buffer for 3 vertices
+    wgpu::Buffer vertexBuffer3 = CreateBuffer(3 * kFloat32x4Stride);
+    // Build vertex buffer for 4 instances
+    wgpu::Buffer instanceBuffer4 = CreateBuffer(4 * kFloat32x2Stride);
+    // Build vertex buffer for 5 instances
+    wgpu::Buffer instanceBuffer5 = CreateBuffer(5 * kFloat32x2Stride);
+
+    // Test for setting vertex buffer for multiple times
+    {
         wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
 
-        for (IndexBufferParams params : kIndexParamsList) {
-            // Build index buffer use given params
-            wgpu::Buffer indexBuffer =
-                CreateBuffer(params.indexBufferSize, wgpu::BufferUsage::Index);
-            // Build vertex buffer for 3 vertices
-            wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
-            // Build vertex buffer for 5 instances
-            wgpu::Buffer instanceBuffer = CreateBuffer(5 * kFloat32x2Stride);
+        // Set to vertexBuffer3 and instanceBuffer5 at last
+        VertexBufferList vertexBufferList = {{0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer4, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                             {0, vertexBuffer3, 0, wgpu::kWholeSize}};
 
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer, 0, wgpu::kWholeSize}};
+        // For Draw, the max vertexCount is 3 and the max instanceCount is 5
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 5, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, 4, 5, 0, 0, false);
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 6, 0, 0, false);
+        // For DrawIndex, the max instanceCount is 5
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 5,
+                                  0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 6,
+                                  0, 0, 0, false);
 
-            IndexBufferDesc indexBufferDesc = {indexBuffer, params.indexFormat,
-                                               params.indexBufferOffsetForEncoder,
-                                               params.indexBufferSizeForEncoder};
+        // Set to vertexBuffer2 and instanceBuffer4 at last
+        vertexBufferList = VertexBufferList{{0, vertexBuffer3, 0, wgpu::kWholeSize},
+                                            {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                            {0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                            {1, instanceBuffer4, 0, wgpu::kWholeSize}};
 
-            uint32_t n = params.maxValidIndexNumber;
-
-            // Control case
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 0, 0,
-                                      true);
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n - 1, 5, 1, 0,
-                                      0, true);
-            // Index buffer OOB, indexCount too large
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 0,
-                                      0, false);
-            // Index buffer OOB, indexCount + firstIndex too large
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 1, 0, 0,
-                                      false);
-
-            if (!HasToggleEnabled("disable_base_vertex")) {
-                // baseVertex is not considered in CPU validation and has no effect on validation
-                // Although baseVertex is too large, it will still pass
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 100,
-                                          0, true);
-                // Index buffer OOB, indexCount too large
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0,
-                                          100, 0, false);
-            }
-        }
+        // For Draw, the max vertexCount is 2 and the max instanceCount is 4
+        TestRenderPassDraw(pipeline, vertexBufferList, 2, 4, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 4, 0, 0, false);
+        TestRenderPassDraw(pipeline, vertexBufferList, 2, 5, 0, 0, false);
+        // For DrawIndex, the max instanceCount is 4
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 4,
+                                  0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 5,
+                                  0, 0, 0, false);
     }
 
-    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedVertexBufferOOB) {
-        for (VertexBufferParams vertexParams : kVertexParamsList) {
-            for (VertexBufferParams instanceParams : kInstanceParamsList) {
-                // Create pipeline with given buffer stride
-                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
-                    vertexParams.bufferStride, instanceParams.bufferStride);
+    // Test for setIndexBuffer multiple times
+    {
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-                auto indexFormat = wgpu::IndexFormat::Uint32;
-                auto indexStride = sizeof(uint32_t);
-
-                // Build index buffer for 12 indexes
-                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-                // Build vertex buffer for vertices
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                // Build vertex buffer for instances
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
-
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder}};
-
-                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
-
-                uint32_t inst = instanceParams.maxValidAccessNumber;
-                // Control case
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0,
-                                          0, 0, true);
-                // Vertex buffer (stepMode = instance) OOB, instanceCount too large
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst + 1,
-                                          0, 0, 0, false);
-
-                if (!HasToggleEnabled("disable_base_instance")) {
-                    // firstInstance is considered in CPU validation
-                    // Vertex buffer (stepMode = instance) in bound
-                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12,
-                                              inst - 1, 0, 0, 1, true);
-                    // Vertex buffer (stepMode = instance) OOB, instanceCount + firstInstance too
-                    // large
-                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst,
-                                              0, 0, 1, false);
-                }
-            }
-        }
-    }
-
-    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, ZeroArrayStrideVertexBufferOOB) {
-        // In this test, we use VertexBufferParams.maxValidAccessNumber > 0 to indicate that such
-        // buffer parameter meet the requirement of pipeline, and maxValidAccessNumber == 0 to
-        // indicate that such buffer parameter will cause OOB.
-        const std::vector<VertexBufferParams> kVertexParamsListForZeroStride = {
-            // Control case
-            {0, 28, 0, wgpu::kWholeSize, 1},
-            // Non-zero offset
-            {0, 28, 4, wgpu::kWholeSize, 0},
-            {0, 28, 28, wgpu::kWholeSize, 0},
-            // Non-default size
-            {0, 28, 0, 28, 1},
-            {0, 28, 0, 27, 0},
-            // Non-zero offset and size
-            {0, 32, 4, 28, 1},
-            {0, 31, 4, 27, 0},
-            {0, 31, 4, wgpu::kWholeSize, 0},
-        };
-
-        const std::vector<VertexBufferParams> kInstanceParamsListForZeroStride = {
-            // Control case
-            {0, 20, 0, wgpu::kWholeSize, 1},
-            // Non-zero offset
-            {0, 24, 4, wgpu::kWholeSize, 1},
-            {0, 23, 4, wgpu::kWholeSize, 0},
-            {0, 20, 4, wgpu::kWholeSize, 0},
-            {0, 20, 20, wgpu::kWholeSize, 0},
-            // Non-default size
-            {0, 21, 0, 20, 1},
-            {0, 20, 0, 19, 0},
-            // Non-zero offset and size
-            {0, 30, 4, 20, 1},
-            {0, 30, 4, 19, 0},
-        };
-
-        // Build a pipeline that require a vertex step mode vertex buffer no smaller than 28 bytes
-        // and an instance step mode buffer no smaller than 20 bytes
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithZeroArrayStride();
-
-        for (VertexBufferParams vertexParams : kVertexParamsListForZeroStride) {
-            for (VertexBufferParams instanceParams : kInstanceParamsListForZeroStride) {
-                auto indexFormat = wgpu::IndexFormat::Uint32;
-                auto indexStride = sizeof(uint32_t);
-
-                // Build index buffer for 12 indexes
-                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-                // Build vertex buffer for vertices
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                // Build vertex buffer for instances
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
-
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder}};
-
-                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
-
-                const bool isSuccess = (vertexParams.maxValidAccessNumber > 0) &&
-                                       (instanceParams.maxValidAccessNumber > 0);
-                // vertexCount and instanceCount doesn't matter, as array stride is zero and all
-                // vertex/instance access the same space of buffer
-                TestRenderPassDraw(pipeline, vertexBufferList, 100, 100, 0, 0, isSuccess);
-                // indexCount doesn't matter as long as no index buffer OOB happened
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 100, 0,
-                                          0, 0, isSuccess);
-            }
-        }
-    }
-
-    // Verify that if setVertexBuffer and/or setIndexBuffer for multiple times, only the last one is
-    // taken into account
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, SetBufferMultipleTime) {
-        wgpu::IndexFormat indexFormat = wgpu::IndexFormat::Uint32;
-        uint32_t indexStride = sizeof(uint32_t);
-
-        // Build index buffer for 11 indexes
-        wgpu::Buffer indexBuffer11 = CreateBuffer(11 * indexStride, wgpu::BufferUsage::Index);
-        // Build index buffer for 12 indexes
-        wgpu::Buffer indexBuffer12 = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-        // Build vertex buffer for 2 vertices
-        wgpu::Buffer vertexBuffer2 = CreateBuffer(2 * kFloat32x4Stride);
-        // Build vertex buffer for 3 vertices
-        wgpu::Buffer vertexBuffer3 = CreateBuffer(3 * kFloat32x4Stride);
-        // Build vertex buffer for 4 instances
-        wgpu::Buffer instanceBuffer4 = CreateBuffer(4 * kFloat32x2Stride);
-        // Build vertex buffer for 5 instances
-        wgpu::Buffer instanceBuffer5 = CreateBuffer(5 * kFloat32x2Stride);
-
-        // Test for setting vertex buffer for multiple times
         {
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            // Set to vertexBuffer3 and instanceBuffer5 at last
-            VertexBufferList vertexBufferList = {{0, vertexBuffer2, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer4, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer5, 0, wgpu::kWholeSize},
-                                                 {0, vertexBuffer3, 0, wgpu::kWholeSize}};
+            // Index buffer is set to indexBuffer12 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
 
-            // For Draw, the max vertexCount is 3 and the max instanceCount is 5
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 5, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, 4, 5, 0, 0, false);
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 6, 0, 0, false);
-            // For DrawIndex, the max instanceCount is 5
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      5, 0, 0, 0, true);
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      6, 0, 0, 0, false);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be ok to draw 12 index
+            renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-            // Set to vertexBuffer2 and instanceBuffer4 at last
-            vertexBufferList = VertexBufferList{{0, vertexBuffer3, 0, wgpu::kWholeSize},
-                                                {1, instanceBuffer5, 0, wgpu::kWholeSize},
-                                                {0, vertexBuffer2, 0, wgpu::kWholeSize},
-                                                {1, instanceBuffer4, 0, wgpu::kWholeSize}};
-
-            // For Draw, the max vertexCount is 2 and the max instanceCount is 4
-            TestRenderPassDraw(pipeline, vertexBufferList, 2, 4, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 4, 0, 0, false);
-            TestRenderPassDraw(pipeline, vertexBufferList, 2, 5, 0, 0, false);
-            // For DrawIndex, the max instanceCount is 4
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      4, 0, 0, 0, true);
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      5, 0, 0, 0, false);
+            // Expect success
+            encoder.Finish();
         }
 
-        // Test for setIndexBuffer multiple times
         {
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer12 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
 
-                // Index buffer is set to indexBuffer12 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be index buffer OOB to draw 13 index
+            renderPassEncoder.DrawIndexed(13, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be ok to draw 12 index
-                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
-                renderPassEncoder.End();
+            // Expect failure
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
 
-                // Expect success
-                encoder.Finish();
-            }
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer11 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
 
-                // Index buffer is set to indexBuffer12 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be ok to draw 11 index
+            renderPassEncoder.DrawIndexed(11, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be index buffer OOB to draw 13 index
-                renderPassEncoder.DrawIndexed(13, 1, 0, 0, 0);
-                renderPassEncoder.End();
+            // Expect success
+            encoder.Finish();
+        }
 
-                // Expect failure
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer11 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
 
-                // Index buffer is set to indexBuffer11 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be index buffer OOB to draw 12 index
+            renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be ok to draw 11 index
-                renderPassEncoder.DrawIndexed(11, 1, 0, 0, 0);
-                renderPassEncoder.End();
-
-                // Expect success
-                encoder.Finish();
-            }
-
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
-
-                // Index buffer is set to indexBuffer11 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be index buffer OOB to draw 12 index
-                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
-                renderPassEncoder.End();
-
-                // Expect failure
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+            // Expect failure
+            ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
index 3c4aba3..340b8f7 100644
--- a/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
+++ b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
@@ -18,469 +18,468 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    class ExternalTextureTest : public ValidationTest {
-      public:
-        wgpu::TextureDescriptor CreateTextureDescriptor(
-            wgpu::TextureFormat format = kDefaultTextureFormat) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            descriptor.size.depthOrArrayLayers = kDefaultDepth;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            descriptor.sampleCount = kDefaultSampleCount;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.format = format;
-            descriptor.usage = kDefaultUsage;
-            return descriptor;
-        }
-
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
-
-            queue = device.GetQueue();
-        }
-
-        static constexpr uint32_t kWidth = 32;
-        static constexpr uint32_t kHeight = 32;
-        static constexpr uint32_t kDefaultDepth = 1;
-        static constexpr uint32_t kDefaultMipLevels = 1;
-        static constexpr uint32_t kDefaultSampleCount = 1;
-        static constexpr wgpu::TextureUsage kDefaultUsage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-
-        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
-            wgpu::TextureFormat::RGBA8Unorm;
-        static constexpr wgpu::TextureFormat kBiplanarPlane0Format = wgpu::TextureFormat::R8Unorm;
-        static constexpr wgpu::TextureFormat kBiplanarPlane1Format = wgpu::TextureFormat::RG8Unorm;
-
-        wgpu::Queue queue;
-    };
-
-    TEST_F(ExternalTextureTest, CreateExternalTextureValidation) {
-        // Creating an external texture from a 2D, single-subresource texture should succeed.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture.CreateView();
-            device.CreateExternalTexture(&externalDesc);
-        }
-
-        // Creating an external texture from a non-2D texture should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.dimension = wgpu::TextureDimension::e3D;
-            textureDescriptor.usage = wgpu::TextureUsage::TextureBinding;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture from a texture with mip count > 1 should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.mipLevelCount = 2;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture from a texture without TextureUsage::TextureBinding should
-        // fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.mipLevelCount = 2;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an unsupported format should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.format = wgpu::TextureFormat::R8Uint;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an multisampled texture should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.sampleCount = 4;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an error texture view should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::TextureViewDescriptor errorViewDescriptor;
-            errorViewDescriptor.format = kDefaultTextureFormat;
-            errorViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-            errorViewDescriptor.mipLevelCount = 1;
-            errorViewDescriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(wgpu::TextureView errorTextureView =
-                                    internalTexture.CreateView(&errorViewDescriptor));
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = errorTextureView;
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
+class ExternalTextureTest : public ValidationTest {
+  public:
+    wgpu::TextureDescriptor CreateTextureDescriptor(
+        wgpu::TextureFormat format = kDefaultTextureFormat) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        descriptor.size.depthOrArrayLayers = kDefaultDepth;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.sampleCount = kDefaultSampleCount;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = format;
+        descriptor.usage = kDefaultUsage;
+        return descriptor;
     }
 
-    // Test that external texture creation works as expected in multiplane scenarios.
-    TEST_F(ExternalTextureTest, CreateMultiplanarExternalTextureValidation) {
-        // Creating an external texture from two 2D, single-subresource textures with a biplanar
-        // format should succeed.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            device.CreateExternalTexture(&externalDesc);
-        }
-
-        // Creating a multiplanar external texture with an unsupported format for plane0 should
-        // result in an error.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kDefaultTextureFormat);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating a multiplanar external texture with an unsupported format for plane1 should
-        // result in an error.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kDefaultTextureFormat);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating a multiplanar external texture with a non-sRGB color space should fail.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-            externalDesc.colorSpace = wgpu::PredefinedColorSpace::Undefined;
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
+        queue = device.GetQueue();
     }
 
-    // Test that submitting a render pass that contains a destroyed external texture results in
-    // an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInRenderPass) {
+    static constexpr uint32_t kWidth = 32;
+    static constexpr uint32_t kHeight = 32;
+    static constexpr uint32_t kDefaultDepth = 1;
+    static constexpr uint32_t kDefaultMipLevels = 1;
+    static constexpr uint32_t kDefaultSampleCount = 1;
+    static constexpr wgpu::TextureUsage kDefaultUsage =
+        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+    static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+    static constexpr wgpu::TextureFormat kBiplanarPlane0Format = wgpu::TextureFormat::R8Unorm;
+    static constexpr wgpu::TextureFormat kBiplanarPlane1Format = wgpu::TextureFormat::RG8Unorm;
+
+    wgpu::Queue queue;
+};
+
+TEST_F(ExternalTextureTest, CreateExternalTextureValidation) {
+    // Creating an external texture from a 2D, single-subresource texture should succeed.
+    {
         wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
         wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
 
         wgpu::ExternalTextureDescriptor externalDesc;
         externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+        device.CreateExternalTexture(&externalDesc);
+    }
 
-        // Create a bind group that contains the external texture.
+    // Creating an external texture from a non-2D texture should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.dimension = wgpu::TextureDimension::e3D;
+        textureDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture from a texture with mip count > 1 should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.mipLevelCount = 2;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture from a texture without TextureUsage::TextureBinding should
+    // fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.mipLevelCount = 2;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an unsupported format should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.format = wgpu::TextureFormat::R8Uint;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an multisampled texture should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.sampleCount = 4;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an error texture view should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::TextureViewDescriptor errorViewDescriptor;
+        errorViewDescriptor.format = kDefaultTextureFormat;
+        errorViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+        errorViewDescriptor.mipLevelCount = 1;
+        errorViewDescriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(wgpu::TextureView errorTextureView =
+                                internalTexture.CreateView(&errorViewDescriptor));
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = errorTextureView;
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+}
+
+// Test that external texture creation works as expected in multiplane scenarios.
+TEST_F(ExternalTextureTest, CreateMultiplanarExternalTextureValidation) {
+    // Creating an external texture from two 2D, single-subresource textures with a biplanar
+    // format should succeed.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        device.CreateExternalTexture(&externalDesc);
+    }
+
+    // Creating a multiplanar external texture with an unsupported format for plane0 should
+    // result in an error.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kDefaultTextureFormat);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating a multiplanar external texture with an unsupported format for plane1 should
+    // result in an error.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kDefaultTextureFormat);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating a multiplanar external texture with a non-sRGB color space should fail.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+        externalDesc.colorSpace = wgpu::PredefinedColorSpace::Undefined;
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+}
+
+// Test that submitting a render pass that contains a destroyed external texture results in
+// an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying the external texture should result in an error.
+    {
+        externalTexture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a render pass that contains a dereferenced external texture results in
+// success
+TEST_F(ExternalTextureTest, SubmitDereferencedExternalTextureInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Dereferencing the external texture should not result in a use-after-free error.
+    {
+        externalTexture = nullptr;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+}
+
+// Test that submitting a render pass that contains a destroyed external texture plane
+// results in an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying an external texture underlying plane should result in an error.
+    {
+        texture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a compute pass that contains a destroyed external texture results in
+// an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInComputePass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    wgpu::ComputePassDescriptor computePass;
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying the external texture should result in an error.
+    {
+        externalTexture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a compute pass that contains a destroyed external texture plane
+// results in an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInComputePass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    wgpu::ComputePassDescriptor computePass;
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying an external texture underlying plane should result in an error.
+    {
+        texture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Ensure that bind group validation catches external textures mimatched from the BGL.
+TEST_F(ExternalTextureTest, BindGroupDoesNotMatchLayout) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Control case should succeed.
+    {
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
             device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying the external texture should result in an error.
-        {
-            externalTexture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
+        utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
     }
 
-    // Test that submitting a render pass that contains a dereferenced external texture results in
-    // success
-    TEST_F(ExternalTextureTest, SubmitDereferencedExternalTextureInRenderPass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
+    // Bind group creation should fail when an external texture is not present in the
+    // corresponding slot of the bind group layout.
+    {
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Dereferencing the external texture should not result in a use-after-free error.
-        {
-            externalTexture = nullptr;
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+        ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, externalTexture}}));
     }
-
-    // Test that submitting a render pass that contains a destroyed external texture plane
-    // results in an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInRenderPass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying an external texture underlying plane should result in an error.
-        {
-            texture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Test that submitting a compute pass that contains a destroyed external texture results in
-    // an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInComputePass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        wgpu::ComputePassDescriptor computePass;
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying the external texture should result in an error.
-        {
-            externalTexture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Test that submitting a compute pass that contains a destroyed external texture plane
-    // results in an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInComputePass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        wgpu::ComputePassDescriptor computePass;
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying an external texture underlying plane should result in an error.
-        {
-            texture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Ensure that bind group validation catches external textures mimatched from the BGL.
-    TEST_F(ExternalTextureTest, BindGroupDoesNotMatchLayout) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Control case should succeed.
-        {
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-            utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-        }
-
-        // Bind group creation should fail when an external texture is not present in the
-        // corresponding slot of the bind group layout.
-        {
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
-            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, externalTexture}}));
-        }
-    }
+}
 
 }  // namespace
diff --git a/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
index 4a5abc1..ae213f4 100644
--- a/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
@@ -22,135 +22,134 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    // Helper for describing bindings throughout the tests
-    struct BindingDescriptor {
-        uint32_t group;
-        uint32_t binding;
-        std::string decl;
-        std::string ref_type;
-        std::string ref_mem;
-        uint64_t size;
-        wgpu::BufferBindingType type = wgpu::BufferBindingType::Storage;
-        wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
-    };
+// Helper for describing bindings throughout the tests
+struct BindingDescriptor {
+    uint32_t group;
+    uint32_t binding;
+    std::string decl;
+    std::string ref_type;
+    std::string ref_mem;
+    uint64_t size;
+    wgpu::BufferBindingType type = wgpu::BufferBindingType::Storage;
+    wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
+};
 
-    // Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
-    // each element one at a time This is useful to verify some behavior happens if any element is
-    // offset from original
-    template <typename F>
-    void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
-        std::vector<uint64_t> modifiedSizes = originalSizes;
-        for (size_t i = 0; i < originalSizes.size(); ++i) {
-            if (offset < 0) {
-                ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
+// Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
+// each element one at a time This is useful to verify some behavior happens if any element is
+// offset from original
+template <typename F>
+void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
+    std::vector<uint64_t> modifiedSizes = originalSizes;
+    for (size_t i = 0; i < originalSizes.size(); ++i) {
+        if (offset < 0) {
+            ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
+        }
+        // Run the function with an element offset, and restore element afterwards
+        modifiedSizes[i] += offset;
+        func(modifiedSizes);
+        modifiedSizes[i] -= offset;
+    }
+}
+
+// Runs |func| with |correctSizes|, and an expectation of success and failure
+template <typename F>
+void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
+    // To validate size:
+    // Check invalid with bind group with one less
+    // Check valid with bind group with correct size
+
+    // Make sure (every size - 1) produces an error
+    WithEachSizeOffsetBy(-1, correctSizes,
+                         [&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
+
+    // Make sure correct sizes work
+    func(correctSizes, true);
+
+    // Make sure (every size + 1) works
+    WithEachSizeOffsetBy(1, correctSizes,
+                         [&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
+}
+
+// Creates a bind group with given bindings for shader text
+std::string GenerateBindingString(const std::vector<BindingDescriptor>& bindings) {
+    std::ostringstream ostream;
+    size_t index = 0;
+    for (const BindingDescriptor& b : bindings) {
+        ostream << "struct S" << index << " { " << b.decl << "}\n";
+        ostream << "@group(" << b.group << ") @binding(" << b.binding << ") ";
+        switch (b.type) {
+            case wgpu::BufferBindingType::Uniform:
+                ostream << "var<uniform> b" << index << " : S" << index << ";\n";
+                break;
+            case wgpu::BufferBindingType::Storage:
+                ostream << "var<storage, read_write> b" << index << " : S" << index << ";\n";
+                break;
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+                ostream << "var<storage, read> b" << index << " : S" << index << ";\n";
+                break;
+            default:
+                UNREACHABLE();
+        }
+        index++;
+    }
+    return ostream.str();
+}
+
+std::string GenerateReferenceString(const std::vector<BindingDescriptor>& bindings,
+                                    wgpu::ShaderStage stage) {
+    std::ostringstream ostream;
+    size_t index = 0;
+    for (const BindingDescriptor& b : bindings) {
+        if (b.visibility & stage) {
+            if (!b.ref_type.empty() && !b.ref_mem.empty()) {
+                ostream << "var r" << index << " : " << b.ref_type << " = b" << index << "."
+                        << b.ref_mem << ";\n";
             }
-            // Run the function with an element offset, and restore element afterwards
-            modifiedSizes[i] += offset;
-            func(modifiedSizes);
-            modifiedSizes[i] -= offset;
         }
+        index++;
     }
+    return ostream.str();
+}
 
-    // Runs |func| with |correctSizes|, and an expectation of success and failure
-    template <typename F>
-    void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
-        // To validate size:
-        // Check invalid with bind group with one less
-        // Check valid with bind group with correct size
+// Used for adding custom types available throughout the tests
+// NOLINTNEXTLINE(runtime/string)
+static const std::string kStructs = "struct ThreeFloats {f1 : f32, f2 : f32, f3 : f32,}\n";
 
-        // Make sure (every size - 1) produces an error
-        WithEachSizeOffsetBy(-1, correctSizes,
-                             [&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
+// Creates a compute shader with given bindings
+std::string CreateComputeShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) +
+           "@stage(compute) @workgroup_size(1,1,1) fn main() {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Compute) + "}";
+}
 
-        // Make sure correct sizes work
-        func(correctSizes, true);
+// Creates a vertex shader with given bindings
+std::string CreateVertexShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) +
+           "@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Vertex) +
+           "\n   return vec4<f32>(); " + "}";
+}
 
-        // Make sure (every size + 1) works
-        WithEachSizeOffsetBy(1, correctSizes,
-                             [&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
+// Creates a fragment shader with given bindings
+std::string CreateFragmentShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) + "@stage(fragment) fn main() {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Fragment) + "}";
+}
+
+// Concatenates vectors containing BindingDescriptor
+std::vector<BindingDescriptor> CombineBindings(
+    std::initializer_list<std::vector<BindingDescriptor>> bindings) {
+    std::vector<BindingDescriptor> result;
+    for (const std::vector<BindingDescriptor>& b : bindings) {
+        result.insert(result.end(), b.begin(), b.end());
     }
-
-    // Creates a bind group with given bindings for shader text
-    std::string GenerateBindingString(const std::vector<BindingDescriptor>& bindings) {
-        std::ostringstream ostream;
-        size_t index = 0;
-        for (const BindingDescriptor& b : bindings) {
-            ostream << "struct S" << index << " { " << b.decl << "}\n";
-            ostream << "@group(" << b.group << ") @binding(" << b.binding << ") ";
-            switch (b.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    ostream << "var<uniform> b" << index << " : S" << index << ";\n";
-                    break;
-                case wgpu::BufferBindingType::Storage:
-                    ostream << "var<storage, read_write> b" << index << " : S" << index << ";\n";
-                    break;
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    ostream << "var<storage, read> b" << index << " : S" << index << ";\n";
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-            index++;
-        }
-        return ostream.str();
-    }
-
-    std::string GenerateReferenceString(const std::vector<BindingDescriptor>& bindings,
-                                        wgpu::ShaderStage stage) {
-        std::ostringstream ostream;
-        size_t index = 0;
-        for (const BindingDescriptor& b : bindings) {
-            if (b.visibility & stage) {
-                if (!b.ref_type.empty() && !b.ref_mem.empty()) {
-                    ostream << "var r" << index << " : " << b.ref_type << " = b" << index << "."
-                            << b.ref_mem << ";\n";
-                }
-            }
-            index++;
-        }
-        return ostream.str();
-    }
-
-    // Used for adding custom types available throughout the tests
-    static const std::string kStructs = "struct ThreeFloats {f1 : f32, f2 : f32, f3 : f32,}\n";
-
-    // Creates a compute shader with given bindings
-    std::string CreateComputeShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) +
-               "@stage(compute) @workgroup_size(1,1,1) fn main() {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Compute) + "}";
-    }
-
-    // Creates a vertex shader with given bindings
-    std::string CreateVertexShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) +
-               "@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Vertex) +
-               "\n   return vec4<f32>(); " + "}";
-    }
-
-    // Creates a fragment shader with given bindings
-    std::string CreateFragmentShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) + "@stage(fragment) fn main() {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Fragment) + "}";
-    }
-
-    // Concatenates vectors containing BindingDescriptor
-    std::vector<BindingDescriptor> CombineBindings(
-        std::initializer_list<std::vector<BindingDescriptor>> bindings) {
-        std::vector<BindingDescriptor> result;
-        for (const std::vector<BindingDescriptor>& b : bindings) {
-            result.insert(result.end(), b.begin(), b.end());
-        }
-        return result;
-    }
+    return result;
+}
 }  // namespace
 
 class MinBufferSizeTestsBase : public ValidationTest {
   public:
-    void SetUp() override {
-        ValidationTest::SetUp();
-    }
+    void SetUp() override { ValidationTest::SetUp(); }
 
     wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
         wgpu::BufferDescriptor bufferDescriptor;
diff --git a/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
index bbb8589..49195c0 100644
--- a/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
+++ b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
@@ -25,158 +25,157 @@
 
 namespace {
 
-    class RenderPipelineAndPassCompatibilityTests : public ValidationTest {
-      public:
-        wgpu::RenderPipeline CreatePipeline(wgpu::TextureFormat format,
-                                            bool enableDepthWrite,
-                                            bool enableStencilWrite) {
-            // Create a NoOp pipeline
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+class RenderPipelineAndPassCompatibilityTests : public ValidationTest {
+  public:
+    wgpu::RenderPipeline CreatePipeline(wgpu::TextureFormat format,
+                                        bool enableDepthWrite,
+                                        bool enableStencilWrite) {
+        // Create a NoOp pipeline
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
-            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
                 @stage(fragment) fn main() {
                 })");
-            pipelineDescriptor.cFragment.targets = nullptr;
-            pipelineDescriptor.cFragment.targetCount = 0;
+        pipelineDescriptor.cFragment.targets = nullptr;
+        pipelineDescriptor.cFragment.targetCount = 0;
 
-            // Enable depth/stencil write if needed
-            wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
-            if (enableDepthWrite) {
-                depthStencil->depthWriteEnabled = true;
-            }
-            if (enableStencilWrite) {
-                depthStencil->stencilFront.failOp = wgpu::StencilOperation::Replace;
-            }
-            return device.CreateRenderPipeline(&pipelineDescriptor);
+        // Enable depth/stencil write if needed
+        wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
+        if (enableDepthWrite) {
+            depthStencil->depthWriteEnabled = true;
+        }
+        if (enableStencilWrite) {
+            depthStencil->stencilFront.failOp = wgpu::StencilOperation::Replace;
+        }
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    utils::ComboRenderPassDescriptor CreateRenderPassDescriptor(wgpu::TextureFormat format,
+                                                                bool depthReadOnly,
+                                                                bool stencilReadOnly) {
+        wgpu::TextureDescriptor textureDescriptor = {};
+        textureDescriptor.size = {kSize, kSize, 1};
+        textureDescriptor.format = format;
+        textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        wgpu::Texture depthStencilTexture = device.CreateTexture(&textureDescriptor);
+
+        utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
+        if (depthReadOnly) {
+            passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
         }
 
-        utils::ComboRenderPassDescriptor CreateRenderPassDescriptor(wgpu::TextureFormat format,
-                                                                    bool depthReadOnly,
-                                                                    bool stencilReadOnly) {
-            wgpu::TextureDescriptor textureDescriptor = {};
-            textureDescriptor.size = {kSize, kSize, 1};
-            textureDescriptor.format = format;
-            textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
-            wgpu::Texture depthStencilTexture = device.CreateTexture(&textureDescriptor);
-
-            utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
-            if (depthReadOnly) {
-                passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
-                passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-                passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            }
-
-            if (stencilReadOnly) {
-                passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-                passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-                passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp =
-                    wgpu::StoreOp::Undefined;
-            }
-
-            return passDescriptor;
+        if (stencilReadOnly) {
+            passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
         }
-    };
 
-    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
-    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
-    TEST_F(RenderPipelineAndPassCompatibilityTests, WriteAndReadOnlyConflictForDepthStencil) {
-        for (bool depthStencilReadOnlyInPass : {true, false}) {
-            for (bool depthWriteInPipeline : {true, false}) {
-                for (bool stencilWriteInPipeline : {true, false}) {
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
-                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
-                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-                    wgpu::RenderPipeline pipeline =
-                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
-                    pass.SetPipeline(pipeline);
-                    pass.Draw(3);
-                    pass.End();
-                    if (depthStencilReadOnlyInPass &&
-                        (depthWriteInPipeline || stencilWriteInPipeline)) {
-                        ASSERT_DEVICE_ERROR(encoder.Finish());
-                    } else {
-                        encoder.Finish();
-                    }
+        return passDescriptor;
+    }
+};
+
+// Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+// depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+TEST_F(RenderPipelineAndPassCompatibilityTests, WriteAndReadOnlyConflictForDepthStencil) {
+    for (bool depthStencilReadOnlyInPass : {true, false}) {
+        for (bool depthWriteInPipeline : {true, false}) {
+            for (bool stencilWriteInPipeline : {true, false}) {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                    kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                wgpu::RenderPipeline pipeline =
+                    CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                pass.SetPipeline(pipeline);
+                pass.Draw(3);
+                pass.End();
+                if (depthStencilReadOnlyInPass &&
+                    (depthWriteInPipeline || stencilWriteInPipeline)) {
+                    ASSERT_DEVICE_ERROR(encoder.Finish());
+                } else {
+                    encoder.Finish();
                 }
             }
         }
     }
+}
 
-    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
-    // depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle.
-    TEST_F(RenderPipelineAndPassCompatibilityTests,
-           WriteAndReadOnlyConflictForDepthStencilBetweenPipelineAndBundle) {
+// Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+// depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle.
+TEST_F(RenderPipelineAndPassCompatibilityTests,
+       WriteAndReadOnlyConflictForDepthStencilBetweenPipelineAndBundle) {
+    for (bool depthStencilReadOnlyInBundle : {true, false}) {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.depthStencilFormat = kFormat;
+        desc.depthReadOnly = depthStencilReadOnlyInBundle;
+        desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+
+        for (bool depthWriteInPipeline : {true, false}) {
+            for (bool stencilWriteInPipeline : {true, false}) {
+                wgpu::RenderBundleEncoder renderBundleEncoder =
+                    device.CreateRenderBundleEncoder(&desc);
+                wgpu::RenderPipeline pipeline =
+                    CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                renderBundleEncoder.SetPipeline(pipeline);
+                renderBundleEncoder.Draw(3);
+                if (depthStencilReadOnlyInBundle &&
+                    (depthWriteInPipeline || stencilWriteInPipeline)) {
+                    ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+                } else {
+                    renderBundleEncoder.Finish();
+                }
+            }
+        }
+    }
+}
+
+// Test depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle vs
+// depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+TEST_F(RenderPipelineAndPassCompatibilityTests,
+       WriteAndReadOnlyConflictForDepthStencilBetweenBundleAndPass) {
+    for (bool depthStencilReadOnlyInPass : {true, false}) {
         for (bool depthStencilReadOnlyInBundle : {true, false}) {
-            utils::ComboRenderBundleEncoderDescriptor desc = {};
-            desc.depthStencilFormat = kFormat;
-            desc.depthReadOnly = depthStencilReadOnlyInBundle;
-            desc.stencilReadOnly = depthStencilReadOnlyInBundle;
-
-            for (bool depthWriteInPipeline : {true, false}) {
-                for (bool stencilWriteInPipeline : {true, false}) {
-                    wgpu::RenderBundleEncoder renderBundleEncoder =
-                        device.CreateRenderBundleEncoder(&desc);
-                    wgpu::RenderPipeline pipeline =
-                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+            for (bool emptyBundle : {true, false}) {
+                // Create render bundle, with or without a pipeline
+                utils::ComboRenderBundleEncoderDescriptor desc = {};
+                desc.depthStencilFormat = kFormat;
+                desc.depthReadOnly = depthStencilReadOnlyInBundle;
+                desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+                wgpu::RenderBundleEncoder renderBundleEncoder =
+                    device.CreateRenderBundleEncoder(&desc);
+                if (!emptyBundle) {
+                    wgpu::RenderPipeline pipeline = CreatePipeline(
+                        kFormat, !depthStencilReadOnlyInBundle, !depthStencilReadOnlyInBundle);
                     renderBundleEncoder.SetPipeline(pipeline);
                     renderBundleEncoder.Draw(3);
-                    if (depthStencilReadOnlyInBundle &&
-                        (depthWriteInPipeline || stencilWriteInPipeline)) {
-                        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
-                    } else {
-                        renderBundleEncoder.Finish();
-                    }
+                }
+                wgpu::RenderBundle bundle = renderBundleEncoder.Finish();
+
+                // Create render pass and call ExecuteBundles()
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                    kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                pass.ExecuteBundles(1, &bundle);
+                pass.End();
+                if (!depthStencilReadOnlyInPass || depthStencilReadOnlyInBundle) {
+                    encoder.Finish();
+                } else {
+                    ASSERT_DEVICE_ERROR(encoder.Finish());
                 }
             }
         }
     }
+}
 
-    // Test depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle vs
-    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
-    TEST_F(RenderPipelineAndPassCompatibilityTests,
-           WriteAndReadOnlyConflictForDepthStencilBetweenBundleAndPass) {
-        for (bool depthStencilReadOnlyInPass : {true, false}) {
-            for (bool depthStencilReadOnlyInBundle : {true, false}) {
-                for (bool emptyBundle : {true, false}) {
-                    // Create render bundle, with or without a pipeline
-                    utils::ComboRenderBundleEncoderDescriptor desc = {};
-                    desc.depthStencilFormat = kFormat;
-                    desc.depthReadOnly = depthStencilReadOnlyInBundle;
-                    desc.stencilReadOnly = depthStencilReadOnlyInBundle;
-                    wgpu::RenderBundleEncoder renderBundleEncoder =
-                        device.CreateRenderBundleEncoder(&desc);
-                    if (!emptyBundle) {
-                        wgpu::RenderPipeline pipeline = CreatePipeline(
-                            kFormat, !depthStencilReadOnlyInBundle, !depthStencilReadOnlyInBundle);
-                        renderBundleEncoder.SetPipeline(pipeline);
-                        renderBundleEncoder.Draw(3);
-                    }
-                    wgpu::RenderBundle bundle = renderBundleEncoder.Finish();
-
-                    // Create render pass and call ExecuteBundles()
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
-                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
-                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-                    pass.ExecuteBundles(1, &bundle);
-                    pass.End();
-                    if (!depthStencilReadOnlyInPass || depthStencilReadOnlyInBundle) {
-                        encoder.Finish();
-                    } else {
-                        ASSERT_DEVICE_ERROR(encoder.Finish());
-                    }
-                }
-            }
-        }
-    }
-
-    // TODO(dawn:485): add more tests. For example:
-    //   - depth/stencil attachment should be designated if depth/stencil test is enabled.
-    //   - pipeline and pass compatibility tests for color attachment(s).
-    //   - pipeline and pass compatibility tests for compute.
+// TODO(dawn:485): add more tests. For example:
+//   - depth/stencil attachment should be designated if depth/stencil test is enabled.
+//   - pipeline and pass compatibility tests for color attachment(s).
+//   - pipeline and pass compatibility tests for compute.
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
index 903f6af..b099e79 100644
--- a/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
@@ -19,347 +19,347 @@
 
 namespace {
 
-    class QueueSubmitValidationTest : public ValidationTest {};
+class QueueSubmitValidationTest : public ValidationTest {};
 
-    // Test submitting with a mapped buffer is disallowed
-    TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
-        // Create a map-write buffer.
-        const uint64_t kBufferSize = 4;
-        wgpu::BufferDescriptor descriptor;
-        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+// Test submitting with a mapped buffer is disallowed
+TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
+    // Create a map-write buffer.
+    const uint64_t kBufferSize = 4;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
 
-        // Create a fake copy destination buffer
-        descriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+    // Create a fake copy destination buffer
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
 
-        // Create a command buffer that reads from the mappable buffer.
-        wgpu::CommandBuffer commands;
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        wgpu::Queue queue = device.GetQueue();
-
-        // Submitting when the buffer has never been mapped should succeed
-        queue.Submit(1, &commands);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Map the buffer, submitting when the buffer is mapped should fail
-        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
-
-        // Try submitting before the callback is fired.
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        WaitForAllOperations(device);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Try submitting after the callback is fired.
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Unmap the buffer, queue submit should succeed
-        buffer.Unmap();
-        queue.Submit(1, &commands);
+    // Create a command buffer that reads from the mappable buffer.
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test it is invalid to submit a command buffer twice
-    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) {
-        wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish();
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        // Should succeed
-        queue.Submit(1, &commandBuffer);
+    // Submitting when the buffer has never been mapped should succeed
+    queue.Submit(1, &commands);
 
-        // Should fail because command buffer was already submitted
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer));
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test resubmitting failed command buffers
-    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) {
-        // Create a map-write buffer
-        const uint64_t kBufferSize = 4;
-        wgpu::BufferDescriptor descriptor;
-        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    // Map the buffer, submitting when the buffer is mapped should fail
+    buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
 
-        // Create a destination buffer for the b2b copy
-        descriptor.usage = wgpu::BufferUsage::CopyDst;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+    // Try submitting before the callback is fired.
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
 
-        // Create a command buffer that reads from the mappable buffer
-        wgpu::CommandBuffer commands;
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
+    WaitForAllOperations(device);
 
-        wgpu::Queue queue = device.GetQueue();
-
-        // Map the source buffer to force a failure
-        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
-
-        // Submitting a command buffer with a mapped buffer should fail
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        // Unmap buffer to fix the failure
-        buffer.Unmap();
-
-        // Resubmitting any command buffer, even if the problem was fixed, should fail
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test that submitting in a buffer mapping callback doesn't cause re-entrance problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) {
-        // Create a buffer for mapping, to run our callback.
-        wgpu::BufferDescriptor descriptor;
-        descriptor.size = 4;
-        descriptor.usage = wgpu::BufferUsage::MapWrite;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    // Try submitting after the callback is fired.
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
 
-        struct CallbackData {
-            wgpu::Device device;
-            wgpu::Buffer buffer;
-        } callbackData = {device, buffer};
-
-        const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
-
-            data->buffer.Unmap();
-
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
-
-        buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData);
-
-        WaitForAllOperations(device);
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test that submitting in a render pipeline creation callback doesn't cause re-entrance
-    // problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) {
-        struct CallbackData {
-            wgpu::Device device;
-        } callbackData = {device};
+    // Unmap the buffer, queue submit should succeed
+    buffer.Unmap();
+    queue.Submit(1, &commands);
+}
 
-        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
-                                 char const* message, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+// Test it is invalid to submit a command buffer twice
+TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) {
+    wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish();
+    wgpu::Queue queue = device.GetQueue();
 
-            wgpuRenderPipelineRelease(pipeline);
+    // Should succeed
+    queue.Submit(1, &commandBuffer);
 
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
+    // Should fail because command buffer was already submitted
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer));
+}
 
-        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+// Test resubmitting failed command buffers
+TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) {
+    // Create a map-write buffer
+    const uint64_t kBufferSize = 4;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    // Create a destination buffer for the b2b copy
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+
+    // Create a command buffer that reads from the mappable buffer
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
+    }
+
+    wgpu::Queue queue = device.GetQueue();
+
+    // Map the source buffer to force a failure
+    buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
+
+    // Submitting a command buffer with a mapped buffer should fail
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+    // Unmap buffer to fix the failure
+    buffer.Unmap();
+
+    // Resubmitting any command buffer, even if the problem was fixed, should fail
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test that submitting in a buffer mapping callback doesn't cause re-entrance problems.
+TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) {
+    // Create a buffer for mapping, to run our callback.
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    struct CallbackData {
+        wgpu::Device device;
+        wgpu::Buffer buffer;
+    } callbackData = {device, buffer};
+
+    const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+        data->buffer.Unmap();
+
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
+
+    buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData);
+
+    WaitForAllOperations(device);
+}
+
+// Test that submitting in a render pipeline creation callback doesn't cause re-entrance
+// problems.
+TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) {
+    struct CallbackData {
+        wgpu::Device device;
+    } callbackData = {device};
+
+    const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+                             char const* message, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+        wgpuRenderPipelineRelease(pipeline);
+
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })");
 
-        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })");
 
-        utils::ComboRenderPipelineDescriptor descriptor;
-        descriptor.vertex.module = vsModule;
-        descriptor.cFragment.module = fsModule;
-        device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData);
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData);
 
-        WaitForAllOperations(device);
-    }
+    WaitForAllOperations(device);
+}
 
-    // Test that submitting in a compute pipeline creation callback doesn't cause re-entrance
-    // problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) {
-        struct CallbackData {
-            wgpu::Device device;
-        } callbackData = {device};
+// Test that submitting in a compute pipeline creation callback doesn't cause re-entrance
+// problems.
+TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) {
+    struct CallbackData {
+        wgpu::Device device;
+    } callbackData = {device};
 
-        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
-                                 char const* message, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+    const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+                             char const* message, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
 
-            wgpuComputePipelineRelease(pipeline);
+        wgpuComputePipelineRelease(pipeline);
 
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
 
-        wgpu::ComputePipelineDescriptor descriptor;
-        descriptor.compute.module = utils::CreateShaderModule(device, R"(
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.compute.module = utils::CreateShaderModule(device, R"(
             @stage(compute) @workgroup_size(1) fn main() {
             })");
-        descriptor.compute.entryPoint = "main";
-        device.CreateComputePipelineAsync(&descriptor, callback, &callbackData);
+    descriptor.compute.entryPoint = "main";
+    device.CreateComputePipelineAsync(&descriptor, callback, &callbackData);
 
-        WaitForAllOperations(device);
-    }
+    WaitForAllOperations(device);
+}
 
-    // Test that buffers in unused compute pass bindgroups are still checked for in
-    // Queue::Submit validation.
-    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
-        wgpu::Queue queue = device.GetQueue();
+// Test that buffers in unused compute pass bindgroups are still checked for in
+// Queue::Submit validation.
+TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
+    wgpu::Queue queue = device.GetQueue();
 
-        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
-        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+    wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+    wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
 
-        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
 
-        // In this test we check that BindGroup 1 is checked, the texture test will check
-        // BindGroup 2. This is to provide coverage of for loops in validation code.
-        wgpu::ComputePipelineDescriptor cpDesc;
-        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
-        cpDesc.compute.entryPoint = "main";
-        cpDesc.compute.module =
-            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+    // In this test we check that BindGroup 1 is checked, the texture test will check
+    // BindGroup 2. This is to provide coverage of for loops in validation code.
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
+    cpDesc.compute.entryPoint = "main";
+    cpDesc.compute.module =
+        utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
 
-        wgpu::BufferDescriptor bufDesc;
-        bufDesc.size = 4;
-        bufDesc.usage = wgpu::BufferUsage::Storage;
+    wgpu::BufferDescriptor bufDesc;
+    bufDesc.size = 4;
+    bufDesc.usage = wgpu::BufferUsage::Storage;
 
-        // Test that completely unused bindgroups still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
+    // Test that completely unused bindgroups still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(1, unusedBG);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(1, unusedBG);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
 
-            if (destroy) {
-                unusedBuffer.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
-        }
-
-        // Test that unused bindgroups because they were replaced still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
-
-            wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, emptyBG);
-            pass.SetBindGroup(1, unusedBG);
-            pass.SetBindGroup(1, usedBG);
-            pass.SetPipeline(pipeline);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedBuffer.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
+        if (destroy) {
+            unusedBuffer.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
         }
     }
 
-    // Test that textures in unused compute pass bindgroups are still checked for in
-    // Queue::Submit validation.
-    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
-        wgpu::Queue queue = device.GetQueue();
+    // Test that unused bindgroups because they were replaced still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
-        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+        wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, emptyBG);
+        pass.SetBindGroup(1, unusedBG);
+        pass.SetBindGroup(1, usedBG);
+        pass.SetPipeline(pipeline);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
 
-        wgpu::ComputePipelineDescriptor cpDesc;
-        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
-        cpDesc.compute.entryPoint = "main";
-        cpDesc.compute.module =
-            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
-
-        wgpu::TextureDescriptor texDesc;
-        texDesc.size = {1, 1, 1};
-        texDesc.usage = wgpu::TextureUsage::TextureBinding;
-        texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-
-        // Test that completely unused bindgroups still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup unusedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(2, unusedBG);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedTexture.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
-        }
-
-        // Test that unused bindgroups because they were replaced still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup unusedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup usedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, emptyBG);
-            pass.SetBindGroup(1, emptyBG);
-            pass.SetBindGroup(2, unusedBG);
-            pass.SetBindGroup(2, usedBG);
-            pass.SetPipeline(pipeline);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedTexture.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
+        if (destroy) {
+            unusedBuffer.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
         }
     }
+}
+
+// Test that textures in unused compute pass bindgroups are still checked for in
+// Queue::Submit validation.
+TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
+    wgpu::Queue queue = device.GetQueue();
+
+    wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+    wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+
+    wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
+    cpDesc.compute.entryPoint = "main";
+    cpDesc.compute.module =
+        utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+    wgpu::TextureDescriptor texDesc;
+    texDesc.size = {1, 1, 1};
+    texDesc.usage = wgpu::TextureUsage::TextureBinding;
+    texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Test that completely unused bindgroups still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup unusedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(2, unusedBG);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        if (destroy) {
+            unusedTexture.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
+        }
+    }
+
+    // Test that unused bindgroups because they were replaced still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup unusedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup usedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, emptyBG);
+        pass.SetBindGroup(1, emptyBG);
+        pass.SetBindGroup(2, unusedBG);
+        pass.SetBindGroup(2, usedBG);
+        pass.SetPipeline(pipeline);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        if (destroy) {
+            unusedTexture.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
+        }
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
index 371931f..b5fe723 100644
--- a/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
@@ -22,789 +22,767 @@
 
 namespace {
 
-    class QueueWriteTextureValidationTest : public ValidationTest {
-      private:
-        void SetUp() override {
-            ValidationTest::SetUp();
-            queue = device.GetQueue();
-        }
-
-      protected:
-        wgpu::Texture Create2DTexture(wgpu::Extent3D size,
-                                      uint32_t mipLevelCount,
-                                      wgpu::TextureFormat format,
-                                      wgpu::TextureUsage usage,
-                                      uint32_t sampleCount = 1) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = size.width;
-            descriptor.size.height = size.height;
-            descriptor.size.depthOrArrayLayers = size.depthOrArrayLayers;
-            descriptor.sampleCount = sampleCount;
-            descriptor.format = format;
-            descriptor.mipLevelCount = mipLevelCount;
-            descriptor.usage = usage;
-            wgpu::Texture tex = device.CreateTexture(&descriptor);
-            return tex;
-        }
-
-        void TestWriteTexture(size_t dataSize,
-                              uint32_t dataOffset,
-                              uint32_t dataBytesPerRow,
-                              uint32_t dataRowsPerImage,
-                              wgpu::Texture texture,
-                              uint32_t texLevel,
-                              wgpu::Origin3D texOrigin,
-                              wgpu::Extent3D size,
-                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
-            std::vector<uint8_t> data(dataSize);
-
-            wgpu::TextureDataLayout textureDataLayout;
-            textureDataLayout.offset = dataOffset;
-            textureDataLayout.bytesPerRow = dataBytesPerRow;
-            textureDataLayout.rowsPerImage = dataRowsPerImage;
-
-            wgpu::ImageCopyTexture imageCopyTexture =
-                utils::CreateImageCopyTexture(texture, texLevel, texOrigin, aspect);
-
-            queue.WriteTexture(&imageCopyTexture, data.data(), dataSize, &textureDataLayout, &size);
-        }
-
-        void TestWriteTextureExactDataSize(uint32_t bytesPerRow,
-                                           uint32_t rowsPerImage,
-                                           wgpu::Texture texture,
-                                           wgpu::TextureFormat textureFormat,
-                                           wgpu::Origin3D origin,
-                                           wgpu::Extent3D extent3D) {
-            // Check the minimal valid dataSize.
-            uint64_t dataSize =
-                utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent3D, textureFormat);
-            TestWriteTexture(dataSize, 0, bytesPerRow, rowsPerImage, texture, 0, origin, extent3D);
-
-            // Check dataSize was indeed minimal.
-            uint64_t invalidSize = dataSize - 1;
-            ASSERT_DEVICE_ERROR(TestWriteTexture(invalidSize, 0, bytesPerRow, rowsPerImage, texture,
-                                                 0, origin, extent3D));
-        }
-
-        wgpu::Queue queue;
-    };
-
-    // Test the success case for WriteTexture
-    TEST_F(QueueWriteTextureValidationTest, Success) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Different copies, including some that touch the OOB condition
-        {
-            // Copy 4x4 block in corner of first mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
-            // Copy 4x4 block in opposite corner of first mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 12, 0}, {4, 4, 1});
-            // Copy 4x4 block in the 4x4 mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {0, 0, 0}, {4, 4, 1});
-            // Copy with a data offset
-            TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-            TestWriteTexture(dataSize, dataSize - 4, 256, wgpu::kCopyStrideUndefined, destination,
-                             0, {0, 0, 0}, {1, 1, 1});
-        }
-
-        // Copies with a 256-byte aligned bytes per row but unaligned texture region
-        {
-            // Unaligned region
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {3, 4, 1});
-            // Unaligned region with texture offset
-            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {5, 7, 0}, {2, 3, 1});
-            // Unaligned region, with data offset
-            TestWriteTexture(dataSize, 31 * 4, 256, 3, destination, 0, {0, 0, 0}, {3, 3, 1});
-        }
-
-        // Empty copies are valid
-        {
-            // An empty copy
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                             {0, 0, 1});
-            // An empty copy with depth = 0
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 0});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                             {0, 0, 0});
-            // An empty copy touching the end of the data
-            TestWriteTexture(dataSize, dataSize, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, dataSize, 0, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 0, 1});
-            // An empty copy touching the side of the texture
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {16, 16, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0,
-                             {16, 16, 0}, {0, 0, 1});
-            // An empty copy with depth = 1 and bytesPerRow > 0
-            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 0, 1});
-            // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
-            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 1, 0});
-            TestWriteTexture(dataSize, 0, 256, 1, destination, 0, {0, 0, 0}, {0, 1, 0});
-            TestWriteTexture(dataSize, 0, 256, 16, destination, 0, {0, 0, 0}, {0, 1, 0});
-        }
+class QueueWriteTextureValidationTest : public ValidationTest {
+  private:
+    void SetUp() override {
+        ValidationTest::SetUp();
+        queue = device.GetQueue();
     }
 
-    // Test OOB conditions on the data
-    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnData) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // OOB on the data because we copy too many pixels
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 5, 1}));
-
-        // OOB on the data because of the offset
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 4, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
-
-        // OOB on the data because utils::RequiredBytesInCopy overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 512, 3, destination, 0, {0, 0, 0}, {4, 3, 1}));
-
-        // Not OOB on the data although bytes per row * height overflows
-        // but utils::RequiredBytesInCopy * depth does not overflow
-        {
-            uint32_t sourceDataSize =
-                utils::RequiredBytesInCopy(256, 0, {7, 3, 1}, wgpu::TextureFormat::RGBA8Unorm);
-            ASSERT_TRUE(256 * 3 > sourceDataSize) << "bytes per row * height should overflow data";
-
-            TestWriteTexture(sourceDataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {7, 3, 1});
-        }
+  protected:
+    wgpu::Texture Create2DTexture(wgpu::Extent3D size,
+                                  uint32_t mipLevelCount,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage,
+                                  uint32_t sampleCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = size.width;
+        descriptor.size.height = size.height;
+        descriptor.size.depthOrArrayLayers = size.depthOrArrayLayers;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        wgpu::Texture tex = device.CreateTexture(&descriptor);
+        return tex;
     }
 
-    // Test OOB conditions on the texture
-    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnTexture) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+    void TestWriteTexture(size_t dataSize,
+                          uint32_t dataOffset,
+                          uint32_t dataBytesPerRow,
+                          uint32_t dataRowsPerImage,
+                          wgpu::Texture texture,
+                          uint32_t texLevel,
+                          wgpu::Origin3D texOrigin,
+                          wgpu::Extent3D size,
+                          wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+        std::vector<uint8_t> data(dataSize);
 
-        // OOB on the texture because x + width overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {13, 12, 0}, {4, 4, 1}));
+        wgpu::TextureDataLayout textureDataLayout;
+        textureDataLayout.offset = dataOffset;
+        textureDataLayout.bytesPerRow = dataBytesPerRow;
+        textureDataLayout.rowsPerImage = dataRowsPerImage;
 
-        // OOB on the texture because y + width overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 13, 0}, {4, 4, 1}));
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, texLevel, texOrigin, aspect);
 
-        // OOB on the texture because we overflow a non-zero mip
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {1, 0, 0}, {4, 4, 1}));
-
-        // OOB on the texture even on an empty copy when we copy to a non-existent mip.
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1}));
-
-        // OOB on the texture because slice overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1}));
+        queue.WriteTexture(&imageCopyTexture, data.data(), dataSize, &textureDataLayout, &size);
     }
 
-    // Test that we force Depth=1 on writes to 2D textures
-    TEST_F(QueueWriteTextureValidationTest, DepthConstraintFor2DTextures) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(0, 0, {0, 0, 2}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Depth > 1 on an empty copy still errors
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2}));
-    }
-
-    // Test WriteTexture with incorrect texture usage
-    TEST_F(QueueWriteTextureValidationTest, IncorrectUsage) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture sampled = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                wgpu::TextureUsage::TextureBinding);
-
-        // Incorrect destination usage
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1}));
-    }
-
-    // Test incorrect values of bytesPerRow and that values not divisible by 256 are allowed.
-    TEST_F(QueueWriteTextureValidationTest, BytesPerRowConstraints) {
-        wgpu::Texture destination = Create2DTexture({3, 7, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // bytesPerRow = 0 or wgpu::kCopyStrideUndefined
-        {
-            // copyHeight > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
-            TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {0, 7, 1});
-            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 7, destination,
-                                                 0, {0, 0, 0}, {0, 7, 1}));
-
-            // copyDepth > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
-            TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {0, 1, 2});
-            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination,
-                                                 0, {0, 0, 0}, {0, 1, 2}));
-
-            // copyHeight = 1 and copyDepth = 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
-            TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0, {0, 0, 0},
-                             {3, 1, 1});
-        }
-
-        // bytesPerRow = 11 is invalid since a row takes 12 bytes.
-        {
-            // copyHeight > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
-            // copyHeight == 0
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 0, destination, 0, {0, 0, 0}, {3, 0, 1}));
-
-            // copyDepth > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
-            // copyDepth == 0
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 0}));
-
-            // copyHeight = 1 and copyDepth = 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
-        }
-
-        // bytesPerRow = 12 is valid since a row takes 12 bytes.
-        TestWriteTexture(128, 0, 12, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
-
-        // bytesPerRow = 13 is valid since a row takes 12 bytes.
-        TestWriteTexture(128, 0, 13, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
-    }
-
-    // Test that if rowsPerImage is greater than 0, it must be at least copy height.
-    TEST_F(QueueWriteTextureValidationTest, RowsPerImageConstraints) {
+    void TestWriteTextureExactDataSize(uint32_t bytesPerRow,
+                                       uint32_t rowsPerImage,
+                                       wgpu::Texture texture,
+                                       wgpu::TextureFormat textureFormat,
+                                       wgpu::Origin3D origin,
+                                       wgpu::Extent3D extent3D) {
+        // Check the minimal valid dataSize.
         uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 5, {4, 4, 2}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+            utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent3D, textureFormat);
+        TestWriteTexture(dataSize, 0, bytesPerRow, rowsPerImage, texture, 0, origin, extent3D);
 
-        // rowsPerImage is wgpu::kCopyStrideUndefined
-        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                         {4, 4, 1});
+        // Check dataSize was indeed minimal.
+        uint64_t invalidSize = dataSize - 1;
+        ASSERT_DEVICE_ERROR(TestWriteTexture(invalidSize, 0, bytesPerRow, rowsPerImage, texture, 0,
+                                             origin, extent3D));
+    }
 
-        // rowsPerImage is equal to copy height (Valid)
+    wgpu::Queue queue;
+};
+
+// Test the success case for WriteTexture
+TEST_F(QueueWriteTextureValidationTest, Success) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy 4x4 block in corner of first mip.
         TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
-
-        // rowsPerImage is larger than copy height (Valid)
-        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 1});
-        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 2});
-
-        // rowsPerImage is less than copy height (Invalid)
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {4, 4, 1}));
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {4, 4, 1}));
-    }
-
-    // Test WriteTexture with data offset
-    TEST_F(QueueWriteTextureValidationTest, DataOffset) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Offset aligned
+        // Copy 4x4 block in opposite corner of first mip.
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 12, 0}, {4, 4, 1});
+        // Copy 4x4 block in the 4x4 mip.
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {0, 0, 0}, {4, 4, 1});
+        // Copy with a data offset
         TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-        // Offset not aligned
-        TestWriteTexture(dataSize, dataSize - 5, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-        // Offset+size too large
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, dataSize - 3, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1}));
+        TestWriteTexture(dataSize, dataSize - 4, 256, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {1, 1, 1});
     }
 
-    // Test multisampled textures can be used in WriteTexture.
-    TEST_F(QueueWriteTextureValidationTest, WriteToMultisampledTexture) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {2, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({2, 2, 1}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst, 4);
-
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 2, destination, 0, {0, 0, 0}, {2, 2, 1}));
+    // Copies with a 256-byte aligned bytes per row but unaligned texture region
+    {
+        // Unaligned region
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {3, 4, 1});
+        // Unaligned region with texture offset
+        TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {5, 7, 0}, {2, 3, 1});
+        // Unaligned region, with data offset
+        TestWriteTexture(dataSize, 31 * 4, 256, 3, destination, 0, {0, 0, 0}, {3, 3, 1});
     }
 
-    // Test that WriteTexture cannot be run with a destroyed texture.
-    TEST_F(QueueWriteTextureValidationTest, DestroyedTexture) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 4, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-        destination.Destroy();
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 1});
+        // An empty copy with depth = 0
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 0});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 0});
+        // An empty copy touching the end of the data
+        TestWriteTexture(dataSize, dataSize, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, dataSize, 0, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {0, 0, 1});
+        // An empty copy touching the side of the texture
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {16, 16, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {16, 16, 0},
+                         {0, 0, 1});
+        // An empty copy with depth = 1 and bytesPerRow > 0
+        TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 1});
+        // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
+        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 1, 0});
+        TestWriteTexture(dataSize, 0, 256, 1, destination, 0, {0, 0, 0}, {0, 1, 0});
+        TestWriteTexture(dataSize, 0, 256, 16, destination, 0, {0, 0, 0}, {0, 1, 0});
+    }
+}
 
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+// Test OOB conditions on the data
+TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnData) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // OOB on the data because we copy too many pixels
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 5, 1}));
+
+    // OOB on the data because of the offset
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 4, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+
+    // OOB on the data because utils::RequiredBytesInCopy overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 512, 3, destination, 0, {0, 0, 0}, {4, 3, 1}));
+
+    // Not OOB on the data although bytes per row * height overflows
+    // but utils::RequiredBytesInCopy * depth does not overflow
+    {
+        uint32_t sourceDataSize =
+            utils::RequiredBytesInCopy(256, 0, {7, 3, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        ASSERT_TRUE(256 * 3 > sourceDataSize) << "bytes per row * height should overflow data";
+
+        TestWriteTexture(sourceDataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {7, 3, 1});
+    }
+}
+
+// Test OOB conditions on the texture
+TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnTexture) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 2}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // OOB on the texture because x + width overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {13, 12, 0}, {4, 4, 1}));
+
+    // OOB on the texture because y + width overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 13, 0}, {4, 4, 1}));
+
+    // OOB on the texture because we overflow a non-zero mip
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {1, 0, 0}, {4, 4, 1}));
+
+    // OOB on the texture even on an empty copy when we copy to a non-existent mip.
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1}));
+
+    // OOB on the texture because slice overflows
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1}));
+}
+
+// Test that we force Depth=1 on writes to 2D textures
+TEST_F(QueueWriteTextureValidationTest, DepthConstraintFor2DTextures) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(0, 0, {0, 0, 2}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Depth > 1 on an empty copy still errors
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2}));
+}
+
+// Test WriteTexture with incorrect texture usage
+TEST_F(QueueWriteTextureValidationTest, IncorrectUsage) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture sampled = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                            wgpu::TextureUsage::TextureBinding);
+
+    // Incorrect destination usage
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test incorrect values of bytesPerRow and that values not divisible by 256 are allowed.
+TEST_F(QueueWriteTextureValidationTest, BytesPerRowConstraints) {
+    wgpu::Texture destination =
+        Create2DTexture({3, 7, 2}, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // bytesPerRow = 0 or wgpu::kCopyStrideUndefined
+    {
+        // copyHeight > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+        TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {0, 7, 1});
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 7, destination, 0,
+                                             {0, 0, 0}, {0, 7, 1}));
+
+        // copyDepth > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+        TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {0, 1, 2});
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0,
+                                             {0, 0, 0}, {0, 1, 2}));
+
+        // copyHeight = 1 and copyDepth = 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+        TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0, {0, 0, 0},
+                         {3, 1, 1});
     }
 
-    // Test WriteTexture with texture in error state causes errors.
-    TEST_F(QueueWriteTextureValidationTest, TextureInErrorState) {
-        wgpu::TextureDescriptor errorTextureDescriptor;
-        errorTextureDescriptor.size.depthOrArrayLayers = 0;
-        ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture =
-                                device.CreateTexture(&errorTextureDescriptor));
-        wgpu::ImageCopyTexture errorImageCopyTexture =
-            utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+    // bytesPerRow = 11 is invalid since a row takes 12 bytes.
+    {
+        // copyHeight > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+        // copyHeight == 0
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 0, destination, 0, {0, 0, 0}, {3, 0, 1}));
 
-        wgpu::Extent3D extent3D = {0, 0, 0};
+        // copyDepth > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+        // copyDepth == 0
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 0}));
 
-        {
-            std::vector<uint8_t> data(4);
-            wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+        // copyHeight = 1 and copyDepth = 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+    }
 
-            ASSERT_DEVICE_ERROR(queue.WriteTexture(&errorImageCopyTexture, data.data(), 4,
-                                                   &textureDataLayout, &extent3D));
+    // bytesPerRow = 12 is valid since a row takes 12 bytes.
+    TestWriteTexture(128, 0, 12, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+
+    // bytesPerRow = 13 is valid since a row takes 12 bytes.
+    TestWriteTexture(128, 0, 13, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+}
+
+// Test that if rowsPerImage is greater than 0, it must be at least copy height.
+TEST_F(QueueWriteTextureValidationTest, RowsPerImageConstraints) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 5, {4, 4, 2}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // rowsPerImage is wgpu::kCopyStrideUndefined
+    TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                     {4, 4, 1});
+
+    // rowsPerImage is equal to copy height (Valid)
+    TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
+
+    // rowsPerImage is larger than copy height (Valid)
+    TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 1});
+    TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 2});
+
+    // rowsPerImage is less than copy height (Invalid)
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {4, 4, 1}));
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test WriteTexture with data offset
+TEST_F(QueueWriteTextureValidationTest, DataOffset) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Offset aligned
+    TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+    // Offset not aligned
+    TestWriteTexture(dataSize, dataSize - 5, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+    // Offset+size too large
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, dataSize - 3, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1}));
+}
+
+// Test multisampled textures can be used in WriteTexture.
+TEST_F(QueueWriteTextureValidationTest, WriteToMultisampledTexture) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {2, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({2, 2, 1}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst, 4);
+
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 2, destination, 0, {0, 0, 0}, {2, 2, 1}));
+}
+
+// Test that WriteTexture cannot be run with a destroyed texture.
+TEST_F(QueueWriteTextureValidationTest, DestroyedTexture) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 4, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+    destination.Destroy();
+
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test WriteTexture with texture in error state causes errors.
+TEST_F(QueueWriteTextureValidationTest, TextureInErrorState) {
+    wgpu::TextureDescriptor errorTextureDescriptor;
+    errorTextureDescriptor.size.depthOrArrayLayers = 0;
+    ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture = device.CreateTexture(&errorTextureDescriptor));
+    wgpu::ImageCopyTexture errorImageCopyTexture =
+        utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D extent3D = {0, 0, 0};
+
+    {
+        std::vector<uint8_t> data(4);
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+
+        ASSERT_DEVICE_ERROR(queue.WriteTexture(&errorImageCopyTexture, data.data(), 4,
+                                               &textureDataLayout, &extent3D));
+    }
+}
+
+// Test that WriteTexture throws an error when requiredBytesInCopy overflows uint64_t
+TEST_F(QueueWriteTextureValidationTest, RequiredBytesInCopyOverflow) {
+    wgpu::Texture destination = Create2DTexture({1, 1, 16}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // success because depth = 1.
+    TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 1});
+    // failure because bytesPerImage * (depth - 1) overflows.
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 16}));
+}
+
+// Regression tests for a bug in the computation of texture data size in Dawn.
+TEST_F(QueueWriteTextureValidationTest, TextureWriteDataSizeLastRowComputation) {
+    constexpr uint32_t kBytesPerRow = 256;
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
+                                                             wgpu::TextureFormat::RG8Unorm};
+
+    {
+        // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid data size
+        // in this test because the data sizes in WriteTexture are not in texels but in bytes.
+        constexpr uint32_t kInvalidDataSize = kBytesPerRow * (kHeight - 1) + kWidth;
+
+        for (wgpu::TextureFormat format : kFormats) {
+            wgpu::Texture destination =
+                Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
+            ASSERT_DEVICE_ERROR(TestWriteTexture(kInvalidDataSize, 0, kBytesPerRow, kHeight,
+                                                 destination, 0, {0, 0, 0}, {kWidth, kHeight, 1}));
         }
     }
 
-    // Test that WriteTexture throws an error when requiredBytesInCopy overflows uint64_t
-    TEST_F(QueueWriteTextureValidationTest, RequiredBytesInCopyOverflow) {
-        wgpu::Texture destination = Create2DTexture({1, 1, 16}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+    {
+        for (wgpu::TextureFormat format : kFormats) {
+            uint32_t validDataSize =
+                utils::RequiredBytesInCopy(kBytesPerRow, 0, {kWidth, kHeight, 1}, format);
+            wgpu::Texture destination =
+                Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
 
-        // success because depth = 1.
-        TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 1});
-        // failure because bytesPerImage * (depth - 1) overflows.
-        ASSERT_DEVICE_ERROR(TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0,
-                                             {0, 0, 0}, {1, 1, 16}));
-    }
-
-    // Regression tests for a bug in the computation of texture data size in Dawn.
-    TEST_F(QueueWriteTextureValidationTest, TextureWriteDataSizeLastRowComputation) {
-        constexpr uint32_t kBytesPerRow = 256;
-        constexpr uint32_t kWidth = 4;
-        constexpr uint32_t kHeight = 4;
-
-        constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
-                                                                 wgpu::TextureFormat::RG8Unorm};
-
-        {
-            // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid data size
-            // in this test because the data sizes in WriteTexture are not in texels but in bytes.
-            constexpr uint32_t kInvalidDataSize = kBytesPerRow * (kHeight - 1) + kWidth;
-
-            for (wgpu::TextureFormat format : kFormats) {
-                wgpu::Texture destination =
-                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
-                ASSERT_DEVICE_ERROR(TestWriteTexture(kInvalidDataSize, 0, kBytesPerRow, kHeight,
+            // Verify the return value of RequiredBytesInCopy() is exactly the minimum valid
+            // data size in this test.
+            {
+                uint32_t invalidDataSize = validDataSize - 1;
+                ASSERT_DEVICE_ERROR(TestWriteTexture(invalidDataSize, 0, kBytesPerRow, kHeight,
                                                      destination, 0, {0, 0, 0},
                                                      {kWidth, kHeight, 1}));
             }
-        }
 
-        {
-            for (wgpu::TextureFormat format : kFormats) {
-                uint32_t validDataSize =
-                    utils::RequiredBytesInCopy(kBytesPerRow, 0, {kWidth, kHeight, 1}, format);
-                wgpu::Texture destination =
-                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
-
-                // Verify the return value of RequiredBytesInCopy() is exactly the minimum valid
-                // data size in this test.
-                {
-                    uint32_t invalidDataSize = validDataSize - 1;
-                    ASSERT_DEVICE_ERROR(TestWriteTexture(invalidDataSize, 0, kBytesPerRow, kHeight,
-                                                         destination, 0, {0, 0, 0},
-                                                         {kWidth, kHeight, 1}));
-                }
-
-                {
-                    TestWriteTexture(validDataSize, 0, kBytesPerRow, kHeight, destination, 0,
-                                     {0, 0, 0}, {kWidth, kHeight, 1});
-                }
+            {
+                TestWriteTexture(validDataSize, 0, kBytesPerRow, kHeight, destination, 0, {0, 0, 0},
+                                 {kWidth, kHeight, 1});
             }
         }
     }
+}
 
-    // Test write from data to mip map of non square texture
-    TEST_F(QueueWriteTextureValidationTest, WriteToMipmapOfNonSquareTexture) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        uint32_t maxMipmapLevel = 3;
-        wgpu::Texture destination =
-            Create2DTexture({4, 2, 1}, maxMipmapLevel, wgpu::TextureFormat::RGBA8Unorm,
-                            wgpu::TextureUsage::CopyDst);
+// Test write from data to mip map of non square texture
+TEST_F(QueueWriteTextureValidationTest, WriteToMipmapOfNonSquareTexture) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    uint32_t maxMipmapLevel = 3;
+    wgpu::Texture destination = Create2DTexture(
+        {4, 2, 1}, maxMipmapLevel, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
 
-        // Copy to top level mip map
-        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 1, {0, 0, 0},
-                         {1, 1, 1});
-        // Copy to high level mip map
-        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2, {0, 0, 0},
-                         {2, 1, 1});
-        // Mip level out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel,
-                                             {0, 0, 0}, {1, 1, 1}));
-        // Copy origin out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2,
-                                             {1, 0, 0}, {2, 1, 1}));
-        // Copy size out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 2, destination, maxMipmapLevel - 2,
-                                             {0, 0, 0}, {2, 2, 1}));
+    // Copy to top level mip map
+    TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 1, {0, 0, 0}, {1, 1, 1});
+    // Copy to high level mip map
+    TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2, {0, 0, 0}, {2, 1, 1});
+    // Mip level out of range
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel, {0, 0, 0}, {1, 1, 1}));
+    // Copy origin out of range
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2,
+                                         {1, 0, 0}, {2, 1, 1}));
+    // Copy size out of range
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 2, destination, maxMipmapLevel - 2,
+                                         {0, 0, 0}, {2, 2, 1}));
+}
+
+// Test writes to multiple array layers of an uncompressed texture
+TEST_F(QueueWriteTextureValidationTest, WriteToMultipleArrayLayers) {
+    wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+        {4, 2, 5}, 1, wgpu::TextureFormat::RGBA8Unorm,
+        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+    // Write to all array layers
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                  {4, 2, 5});
+
+    // Write to the highest array layer
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 4},
+                                  {4, 2, 1});
+
+    // Write to array layers in the middle
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                  {4, 2, 3});
+
+    // Copy with a non-packed rowsPerImage
+    TestWriteTextureExactDataSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                  {4, 2, 5});
+
+    // Copy with bytesPerRow = 500
+    TestWriteTextureExactDataSize(500, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                  {4, 2, 3});
+}
+
+// Test it is invalid to write into a depth texture.
+TEST_F(QueueWriteTextureValidationTest, WriteToDepthAspect) {
+    uint32_t bytesPerRow = sizeof(float) * 4;
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::Depth32Float);
+
+    // Invalid to write into depth32float
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth32Float, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::All));
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::DepthOnly));
     }
 
-    // Test writes to multiple array layers of an uncompressed texture
-    TEST_F(QueueWriteTextureValidationTest, WriteToMultipleArrayLayers) {
+    // Invalid to write into depth24plus
+    {
         wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-            {4, 2, 5}, 1, wgpu::TextureFormat::RGBA8Unorm,
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::All));
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::DepthOnly));
+    }
+}
+
+// Test write texture to the stencil aspect
+TEST_F(QueueWriteTextureValidationTest, WriteToStencilAspect) {
+    uint32_t bytesPerRow = 4;
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::R8Uint);
+
+    // It is valid to write into the stencil aspect of depth24plus-stencil8
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureUsage::CopyDst);
+
+        TestWriteTexture(dataSize, 0, bytesPerRow, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::StencilOnly);
+
+        // And that it fails if the buffer is one byte too small
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize - 1, 0, bytesPerRow, 4, destination, 0,
+                                             {0, 0, 0}, {4, 4, 1},
+                                             wgpu::TextureAspect::StencilOnly));
+
+        // It is invalid to write just part of the subresource size
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 3, destination, 0, {0, 0, 0},
+                                             {3, 3, 1}, wgpu::TextureAspect::StencilOnly));
+    }
+
+    // It is invalid to write into the stencil aspect of depth24plus (no stencil)
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::StencilOnly));
+    }
+}
+
+class WriteTextureTest_CompressedTextureFormats : public QueueWriteTextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                 wgpu::FeatureName::TextureCompressionETC2,
+                                                 wgpu::FeatureName::TextureCompressionASTC};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 3;
+        return adapter.CreateDevice(&descriptor);
+    }
+
+    wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
+                                  uint32_t mipmapLevels = 1,
+                                  uint32_t width = kWidth,
+                                  uint32_t height = kHeight) {
+        constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst;
+        constexpr uint32_t kArrayLayers = 1;
+        return QueueWriteTextureValidationTest::Create2DTexture({width, height, kArrayLayers},
+                                                                mipmapLevels, format, kUsage, 1);
+    }
+
+    void TestWriteTexture(size_t dataSize,
+                          uint32_t dataOffset,
+                          uint32_t dataBytesPerRow,
+                          uint32_t dataRowsPerImage,
+                          wgpu::Texture texture,
+                          uint32_t textLevel,
+                          wgpu::Origin3D textOrigin,
+                          wgpu::Extent3D size) {
+        QueueWriteTextureValidationTest::TestWriteTexture(dataSize, dataOffset, dataBytesPerRow,
+                                                          dataRowsPerImage, texture, textLevel,
+                                                          textOrigin, size);
+    }
+
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+};
+
+// Tests to verify that data offset may not be a multiple of the compressed texture block size
+TEST_F(WriteTextureTest_CompressedTextureFormats, DataOffset) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid if aligned.
+        {
+            uint32_t kAlignedOffset = utils::GetTexelBlockSizeInBytes(format);
+            TestWriteTexture(1024, kAlignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Still valid if not aligned.
+        {
+            uint32_t kUnalignedOffset = utils::GetTexelBlockSizeInBytes(format) - 1;
+            TestWriteTexture(1024, kUnalignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// Tests to verify that bytesPerRow must not be less than (width / blockWidth) *
+// blockSizeInBytes and that it doesn't have to be a multiple of the compressed
+// texture block width.
+TEST_F(WriteTextureTest_CompressedTextureFormats, BytesPerRow) {
+    // Used to compute test width and height.
+    constexpr uint32_t kTestBytesPerRow = 320;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t testWidth = kTestBytesPerRow * blockWidth / blockByteSize;
+        uint32_t testHeight = kTestBytesPerRow * blockHeight / blockByteSize;
+        wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
+
+        // Failures on the BytesPerRow that is not large enough.
+        {
+            uint32_t kSmallBytesPerRow = kTestBytesPerRow - blockByteSize;
+            ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, kSmallBytesPerRow, 4, texture, 0,
+                                                 {0, 0, 0}, {testWidth, blockHeight, 1}));
+        }
+
+        // Test it is valid to use a BytesPerRow that is not a multiple of 256.
+        {
+            TestWriteTexture(1024, 0, kTestBytesPerRow, 4, texture, 0, {0, 0, 0},
+                             {testWidth, blockHeight, 1});
+        }
+
+        // Valid usage of bytesPerRow in WriteTexture with compressed texture formats.
+        {
+            TestWriteTexture(512, 0, blockByteSize, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Valid usage of bytesPerRow in WriteTexture with compressed texture formats. Note that
+        // BytesPerRow is not a multiple of the blockByteSize (but is greater than it).
+        {
+            TestWriteTexture(512, 0, blockByteSize + 1, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// rowsPerImage must be >= heightInBlocks.
+TEST_F(WriteTextureTest_CompressedTextureFormats, RowsPerImage) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid usages of rowsPerImage in WriteTexture with compressed texture formats.
+        {
+            constexpr uint32_t kValidRowsPerImage = 5;
+            TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight * 4, 1});
+        }
+        {
+            constexpr uint32_t kValidRowsPerImage = 4;
+            TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight * 4, 1});
+        }
+
+        // rowsPerImage is smaller than height.
+        {
+            constexpr uint32_t kInvalidRowsPerImage = 3;
+            ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, 256, kInvalidRowsPerImage, texture, 0,
+                                                 {0, 0, 0}, {blockWidth, blockWidth * 4, 1}));
+        }
+    }
+}
+
+// Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width
+// and ImageOffset.y must be a multiple of the compressed texture block height
+TEST_F(WriteTextureTest_CompressedTextureFormats, ImageOffset) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        wgpu::Texture texture2 = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
+
+        // Valid usages of ImageOffset in WriteTexture with compressed texture formats.
+        {
+            TestWriteTexture(512, 0, 256, 4, texture, 0, smallestValidOrigin3D,
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Failures on invalid ImageOffset.x.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1, smallestValidOrigin3D.y,
+                                              0};
+            ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                 {blockWidth, blockHeight, 1}));
+        }
+
+        // Failures on invalid ImageOffset.y.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x, smallestValidOrigin3D.y - 1,
+                                              0};
+            ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                 {blockWidth, blockHeight, 1}));
+        }
+    }
+}
+
+// Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width
+// and ImageExtent.y must be a multiple of the compressed texture block height
+TEST_F(WriteTextureTest_CompressedTextureFormats, ImageExtent) {
+    constexpr uint32_t kMipmapLevels = 3;
+    // We choose a prime that is greater than the current max texel dimension size as a
+    // multiplier to compute the test texture size so that we can be certain that its level 2
+    // mipmap (x4) cannot be a multiple of the dimension. This is useful for testing padding at
+    // the edges of the mipmaps.
+    constexpr uint32_t kBlockPerDim = 13;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = blockWidth * kBlockPerDim;
+        uint32_t testHeight = blockHeight * kBlockPerDim;
+        wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+        wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+
+        wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
+
+        // Valid usages of ImageExtent in WriteTexture with compressed texture formats.
+        { TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, smallestValidExtent3D); }
+
+        // Valid usages of ImageExtent in WriteTexture with compressed texture formats
+        // and non-zero mipmap levels.
+        {
+            constexpr uint32_t kTestMipmapLevel = 2;
+            wgpu::Origin3D testOrigin = {
+                ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
+                ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
+
+            TestWriteTexture(512, 0, 256, 4, texture, kTestMipmapLevel, testOrigin,
+                             smallestValidExtent3D);
+        }
+
+        // Failures on invalid ImageExtent.x.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
+                                              smallestValidExtent3D.height, 1};
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+        }
+
+        // Failures on invalid ImageExtent.y.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
+                                              smallestValidExtent3D.height - 1, 1};
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+        }
+    }
+}
+
+// Test writes to multiple array layers of a compressed texture
+TEST_F(WriteTextureTest_CompressedTextureFormats, WriteToMultipleArrayLayers) {
+    constexpr uint32_t kWidthMultiplier = 3;
+    constexpr uint32_t kHeightMultiplier = 4;
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = kWidthMultiplier * blockWidth;
+        uint32_t testHeight = kHeightMultiplier * blockHeight;
+        wgpu::Texture texture = QueueWriteTextureValidationTest::Create2DTexture(
+            {testWidth, testHeight, 20}, 1, format,
             wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
 
         // Write to all array layers
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 0}, {4, 2, 5});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 0},
+                                      {testWidth, testHeight, 20});
 
         // Write to the highest array layer
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 4}, {4, 2, 1});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 19},
+                                      {testWidth, testHeight, 1});
 
         // Write to array layers in the middle
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 1}, {4, 2, 3});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 1},
+                                      {testWidth, testHeight, 18});
 
-        // Copy with a non-packed rowsPerImage
-        TestWriteTextureExactDataSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 0}, {4, 2, 5});
-
-        // Copy with bytesPerRow = 500
-        TestWriteTextureExactDataSize(500, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 1}, {4, 2, 3});
+        // Write touching the texture corners with a non-packed rowsPerImage
+        TestWriteTextureExactDataSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
+                                      {testWidth - blockWidth, testHeight - blockHeight, 16});
     }
-
-    // Test it is invalid to write into a depth texture.
-    TEST_F(QueueWriteTextureValidationTest, WriteToDepthAspect) {
-        uint32_t bytesPerRow = sizeof(float) * 4;
-        const uint64_t dataSize = utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1},
-                                                             wgpu::TextureFormat::Depth32Float);
-
-        // Invalid to write into depth32float
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth32Float, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::DepthOnly));
-        }
-
-        // Invalid to write into depth24plus
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::DepthOnly));
-        }
-    }
-
-    // Test write texture to the stencil aspect
-    TEST_F(QueueWriteTextureValidationTest, WriteToStencilAspect) {
-        uint32_t bytesPerRow = 4;
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::R8Uint);
-
-        // It is valid to write into the stencil aspect of depth24plus-stencil8
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24PlusStencil8,
-                wgpu::TextureUsage::CopyDst);
-
-            TestWriteTexture(dataSize, 0, bytesPerRow, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::StencilOnly);
-
-            // And that it fails if the buffer is one byte too small
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize - 1, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-
-            // It is invalid to write just part of the subresource size
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 3, destination, 0,
-                                                 {0, 0, 0}, {3, 3, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-        }
-
-        // It is invalid to write into the stencil aspect of depth24plus (no stencil)
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-        }
-    }
-
-    class WriteTextureTest_CompressedTextureFormats : public QueueWriteTextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
-                                                     wgpu::FeatureName::TextureCompressionETC2,
-                                                     wgpu::FeatureName::TextureCompressionASTC};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 3;
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
-                                      uint32_t mipmapLevels = 1,
-                                      uint32_t width = kWidth,
-                                      uint32_t height = kHeight) {
-            constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst;
-            constexpr uint32_t kArrayLayers = 1;
-            return QueueWriteTextureValidationTest::Create2DTexture(
-                {width, height, kArrayLayers}, mipmapLevels, format, kUsage, 1);
-        }
-
-        void TestWriteTexture(size_t dataSize,
-                              uint32_t dataOffset,
-                              uint32_t dataBytesPerRow,
-                              uint32_t dataRowsPerImage,
-                              wgpu::Texture texture,
-                              uint32_t textLevel,
-                              wgpu::Origin3D textOrigin,
-                              wgpu::Extent3D size) {
-            QueueWriteTextureValidationTest::TestWriteTexture(dataSize, dataOffset, dataBytesPerRow,
-                                                              dataRowsPerImage, texture, textLevel,
-                                                              textOrigin, size);
-        }
-
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-    };
-
-    // Tests to verify that data offset may not be a multiple of the compressed texture block size
-    TEST_F(WriteTextureTest_CompressedTextureFormats, DataOffset) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            // Valid if aligned.
-            {
-                uint32_t kAlignedOffset = utils::GetTexelBlockSizeInBytes(format);
-                TestWriteTexture(1024, kAlignedOffset, 256, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Still valid if not aligned.
-            {
-                uint32_t kUnalignedOffset = utils::GetTexelBlockSizeInBytes(format) - 1;
-                TestWriteTexture(1024, kUnalignedOffset, 256, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-        }
-    }
-
-    // Tests to verify that bytesPerRow must not be less than (width / blockWidth) *
-    // blockSizeInBytes and that it doesn't have to be a multiple of the compressed
-    // texture block width.
-    TEST_F(WriteTextureTest_CompressedTextureFormats, BytesPerRow) {
-        // Used to compute test width and height.
-        constexpr uint32_t kTestBytesPerRow = 320;
-
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
-            uint32_t testWidth = kTestBytesPerRow * blockWidth / blockByteSize;
-            uint32_t testHeight = kTestBytesPerRow * blockHeight / blockByteSize;
-            wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
-
-            // Failures on the BytesPerRow that is not large enough.
-            {
-                uint32_t kSmallBytesPerRow = kTestBytesPerRow - blockByteSize;
-                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, kSmallBytesPerRow, 4, texture, 0,
-                                                     {0, 0, 0}, {testWidth, blockHeight, 1}));
-            }
-
-            // Test it is valid to use a BytesPerRow that is not a multiple of 256.
-            {
-                TestWriteTexture(1024, 0, kTestBytesPerRow, 4, texture, 0, {0, 0, 0},
-                                 {testWidth, blockHeight, 1});
-            }
-
-            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats.
-            {
-                TestWriteTexture(512, 0, blockByteSize, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats. Note that
-            // BytesPerRow is not a multiple of the blockByteSize (but is greater than it).
-            {
-                TestWriteTexture(512, 0, blockByteSize + 1, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-        }
-    }
-
-    // rowsPerImage must be >= heightInBlocks.
-    TEST_F(WriteTextureTest_CompressedTextureFormats, RowsPerImage) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            // Valid usages of rowsPerImage in WriteTexture with compressed texture formats.
-            {
-                constexpr uint32_t kValidRowsPerImage = 5;
-                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight * 4, 1});
-            }
-            {
-                constexpr uint32_t kValidRowsPerImage = 4;
-                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight * 4, 1});
-            }
-
-            // rowsPerImage is smaller than height.
-            {
-                constexpr uint32_t kInvalidRowsPerImage = 3;
-                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, 256, kInvalidRowsPerImage, texture, 0,
-                                                     {0, 0, 0}, {blockWidth, blockWidth * 4, 1}));
-            }
-        }
-    }
-
-    // Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width
-    // and ImageOffset.y must be a multiple of the compressed texture block height
-    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageOffset) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            wgpu::Texture texture2 = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
-
-            // Valid usages of ImageOffset in WriteTexture with compressed texture formats.
-            {
-                TestWriteTexture(512, 0, 256, 4, texture, 0, smallestValidOrigin3D,
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Failures on invalid ImageOffset.x.
-            {
-                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1,
-                                                  smallestValidOrigin3D.y, 0};
-                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
-                                                     {blockWidth, blockHeight, 1}));
-            }
-
-            // Failures on invalid ImageOffset.y.
-            {
-                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x,
-                                                  smallestValidOrigin3D.y - 1, 0};
-                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
-                                                     {blockWidth, blockHeight, 1}));
-            }
-        }
-    }
-
-    // Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width
-    // and ImageExtent.y must be a multiple of the compressed texture block height
-    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageExtent) {
-        constexpr uint32_t kMipmapLevels = 3;
-        // We choose a prime that is greater than the current max texel dimension size as a
-        // multiplier to compute the test texture size so that we can be certain that its level 2
-        // mipmap (x4) cannot be a multiple of the dimension. This is useful for testing padding at
-        // the edges of the mipmaps.
-        constexpr uint32_t kBlockPerDim = 13;
-
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t testWidth = blockWidth * kBlockPerDim;
-            uint32_t testHeight = blockHeight * kBlockPerDim;
-            wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
-            wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
-
-            wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
-
-            // Valid usages of ImageExtent in WriteTexture with compressed texture formats.
-            { TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, smallestValidExtent3D); }
-
-            // Valid usages of ImageExtent in WriteTexture with compressed texture formats
-            // and non-zero mipmap levels.
-            {
-                constexpr uint32_t kTestMipmapLevel = 2;
-                wgpu::Origin3D testOrigin = {
-                    ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
-                    ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
-
-                TestWriteTexture(512, 0, 256, 4, texture, kTestMipmapLevel, testOrigin,
-                                 smallestValidExtent3D);
-            }
-
-            // Failures on invalid ImageExtent.x.
-            {
-                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
-                                                  smallestValidExtent3D.height, 1};
-                ASSERT_DEVICE_ERROR(
-                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
-            }
-
-            // Failures on invalid ImageExtent.y.
-            {
-                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
-                                                  smallestValidExtent3D.height - 1, 1};
-                ASSERT_DEVICE_ERROR(
-                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
-            }
-        }
-    }
-
-    // Test writes to multiple array layers of a compressed texture
-    TEST_F(WriteTextureTest_CompressedTextureFormats, WriteToMultipleArrayLayers) {
-        constexpr uint32_t kWidthMultiplier = 3;
-        constexpr uint32_t kHeightMultiplier = 4;
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t testWidth = kWidthMultiplier * blockWidth;
-            uint32_t testHeight = kHeightMultiplier * blockHeight;
-            wgpu::Texture texture = QueueWriteTextureValidationTest::Create2DTexture(
-                {testWidth, testHeight, 20}, 1, format,
-                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
-
-            // Write to all array layers
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 0},
-                                          {testWidth, testHeight, 20});
-
-            // Write to the highest array layer
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 19},
-                                          {testWidth, testHeight, 1});
-
-            // Write to array layers in the middle
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 1},
-                                          {testWidth, testHeight, 18});
-
-            // Write touching the texture corners with a non-packed rowsPerImage
-            TestWriteTextureExactDataSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
-                                          {testWidth - blockWidth, testHeight - blockHeight, 16});
-        }
-    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
index db4d28f..817f302 100644
--- a/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
@@ -22,12 +22,12 @@
 
 namespace {
 
-    class RenderBundleValidationTest : public ValidationTest {
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
+class RenderBundleValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            vsModule = utils::CreateShaderModule(device, R"(
+        vsModule = utils::CreateShaderModule(device, R"(
                 struct S {
                     transform : mat2x2<f32>
                 }
@@ -37,7 +37,7 @@
                     return vec4<f32>();
                 })");
 
-            fsModule = utils::CreateShaderModule(device, R"(
+        fsModule = utils::CreateShaderModule(device, R"(
                 struct Uniforms {
                     color : vec4<f32>
                 }
@@ -51,75 +51,75 @@
                 @stage(fragment) fn main() {
                 })");
 
-            wgpu::BindGroupLayout bgls[] = {
-                utils::MakeBindGroupLayout(
-                    device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}),
-                utils::MakeBindGroupLayout(
-                    device, {
-                                {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
-                                {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
-                            })};
+        wgpu::BindGroupLayout bgls[] = {
+            utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}),
+            utils::MakeBindGroupLayout(
+                device, {
+                            {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
+                            {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
+                        })};
 
-            wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
-            pipelineLayoutDesc.bindGroupLayoutCount = 2;
-            pipelineLayoutDesc.bindGroupLayouts = bgls;
+        wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+        pipelineLayoutDesc.bindGroupLayoutCount = 2;
+        pipelineLayoutDesc.bindGroupLayouts = bgls;
 
-            pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+        pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
 
-            utils::ComboRenderPipelineDescriptor descriptor;
-            InitializeRenderPipelineDescriptor(&descriptor);
-            pipeline = device.CreateRenderPipeline(&descriptor);
+        utils::ComboRenderPipelineDescriptor descriptor;
+        InitializeRenderPipelineDescriptor(&descriptor);
+        pipeline = device.CreateRenderPipeline(&descriptor);
 
-            float data[8];
-            wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
-                                                              wgpu::BufferUsage::Uniform);
+        float data[8];
+        wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
+                                                          wgpu::BufferUsage::Uniform);
 
-            constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
+        constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
 
-            vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
-                                                       wgpu::BufferUsage::Vertex);
+        vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                                   wgpu::BufferUsage::Vertex);
 
-            // Placeholder storage buffer.
-            wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
-                device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
+        // Placeholder storage buffer.
+        wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
+            device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
 
-            // Vertex buffer with storage usage for testing read+write error usage.
-            vertexStorageBuffer =
-                utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
-                                            wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
+        // Vertex buffer with storage usage for testing read+write error usage.
+        vertexStorageBuffer =
+            utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
 
-            bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
-            bg1 = utils::MakeBindGroup(
-                device, bgls[1],
-                {{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
+        bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
+        bg1 = utils::MakeBindGroup(
+            device, bgls[1],
+            {{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
 
-            bg1Vertex = utils::MakeBindGroup(device, bgls[1],
-                                             {{0, buffer, 0, 8 * sizeof(float)},
-                                              {1, vertexStorageBuffer, 0, sizeof(kVertices)}});
-        }
+        bg1Vertex = utils::MakeBindGroup(
+            device, bgls[1],
+            {{0, buffer, 0, 8 * sizeof(float)}, {1, vertexStorageBuffer, 0, sizeof(kVertices)}});
+    }
 
-        void InitializeRenderPipelineDescriptor(utils::ComboRenderPipelineDescriptor* descriptor) {
-            descriptor->layout = pipelineLayout;
-            descriptor->vertex.module = vsModule;
-            descriptor->cFragment.module = fsModule;
-            descriptor->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            descriptor->vertex.bufferCount = 1;
-            descriptor->cBuffers[0].arrayStride = 2 * sizeof(float);
-            descriptor->cBuffers[0].attributeCount = 1;
-            descriptor->cAttributes[0].format = wgpu::VertexFormat::Float32x2;
-            descriptor->cAttributes[0].shaderLocation = 0;
-        }
+    void InitializeRenderPipelineDescriptor(utils::ComboRenderPipelineDescriptor* descriptor) {
+        descriptor->layout = pipelineLayout;
+        descriptor->vertex.module = vsModule;
+        descriptor->cFragment.module = fsModule;
+        descriptor->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        descriptor->vertex.bufferCount = 1;
+        descriptor->cBuffers[0].arrayStride = 2 * sizeof(float);
+        descriptor->cBuffers[0].attributeCount = 1;
+        descriptor->cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+        descriptor->cAttributes[0].shaderLocation = 0;
+    }
 
-        wgpu::ShaderModule vsModule;
-        wgpu::ShaderModule fsModule;
-        wgpu::PipelineLayout pipelineLayout;
-        wgpu::RenderPipeline pipeline;
-        wgpu::Buffer vertexBuffer;
-        wgpu::Buffer vertexStorageBuffer;
-        wgpu::BindGroup bg0;
-        wgpu::BindGroup bg1;
-        wgpu::BindGroup bg1Vertex;
-    };
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+    wgpu::PipelineLayout pipelineLayout;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+    wgpu::Buffer vertexStorageBuffer;
+    wgpu::BindGroup bg0;
+    wgpu::BindGroup bg1;
+    wgpu::BindGroup bg1Vertex;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
index 4ef9884..50f7c51 100644
--- a/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
@@ -22,1103 +22,1079 @@
 
 namespace {
 
-    class RenderPassDescriptorValidationTest : public ValidationTest {
-      public:
-        void AssertBeginRenderPassSuccess(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
-            commandEncoder.Finish();
-        }
-        void AssertBeginRenderPassError(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
-            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
-        }
-
-      private:
-        wgpu::CommandEncoder TestBeginRenderPass(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(descriptor);
-            renderPassEncoder.End();
-            return commandEncoder;
-        }
-    };
-
-    wgpu::Texture CreateTexture(wgpu::Device& device,
-                                wgpu::TextureDimension dimension,
-                                wgpu::TextureFormat format,
-                                uint32_t width,
-                                uint32_t height,
-                                uint32_t arrayLayerCount,
-                                uint32_t mipLevelCount,
-                                uint32_t sampleCount = 1,
-                                wgpu::TextureUsage usage = wgpu::TextureUsage::RenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = dimension;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = sampleCount;
-        descriptor.format = format;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-
-        return device.CreateTexture(&descriptor);
+class RenderPassDescriptorValidationTest : public ValidationTest {
+  public:
+    void AssertBeginRenderPassSuccess(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+        commandEncoder.Finish();
+    }
+    void AssertBeginRenderPassError(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
     }
 
-    wgpu::TextureView Create2DAttachment(wgpu::Device& device,
-                                         uint32_t width,
-                                         uint32_t height,
-                                         wgpu::TextureFormat format) {
-        wgpu::Texture texture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, format, width, height, 1, 1);
-        return texture.CreateView();
+  private:
+    wgpu::CommandEncoder TestBeginRenderPass(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(descriptor);
+        renderPassEncoder.End();
+        return commandEncoder;
+    }
+};
+
+wgpu::Texture CreateTexture(wgpu::Device& device,
+                            wgpu::TextureDimension dimension,
+                            wgpu::TextureFormat format,
+                            uint32_t width,
+                            uint32_t height,
+                            uint32_t arrayLayerCount,
+                            uint32_t mipLevelCount,
+                            uint32_t sampleCount = 1,
+                            wgpu::TextureUsage usage = wgpu::TextureUsage::RenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = dimension;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = sampleCount;
+    descriptor.format = format;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::TextureView Create2DAttachment(wgpu::Device& device,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     wgpu::TextureFormat format) {
+    wgpu::Texture texture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, format, width, height, 1, 1);
+    return texture.CreateView();
+}
+
+// Using BeginRenderPass with no attachments isn't valid
+TEST_F(RenderPassDescriptorValidationTest, Empty) {
+    utils::ComboRenderPassDescriptor renderPass({}, nullptr);
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// A render pass with only one color or one depth attachment is ok
+TEST_F(RenderPassDescriptorValidationTest, OneAttachment) {
+    // One color attachment
+    {
+        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        utils::ComboRenderPassDescriptor renderPass({color});
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+    // One depth-stencil attachment
+    {
+        wgpu::TextureView depthStencil =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencil);
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Test OOB color attachment indices are handled
+TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentOutOfBounds) {
+    std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments + 1> colorAttachments;
+    for (uint32_t i = 0; i < colorAttachments.size(); i++) {
+        colorAttachments[i].view =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        colorAttachments[i].resolveTarget = nullptr;
+        colorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+        colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+        colorAttachments[i].storeOp = wgpu::StoreOp::Store;
     }
 
-    // Using BeginRenderPass with no attachments isn't valid
-    TEST_F(RenderPassDescriptorValidationTest, Empty) {
-        utils::ComboRenderPassDescriptor renderPass({}, nullptr);
+    // Control case: kMaxColorAttachments is valid.
+    {
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = kMaxColorAttachments;
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Error case: kMaxColorAttachments + 1 is an error.
+    {
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = kMaxColorAttachments + 1;
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
         AssertBeginRenderPassError(&renderPass);
     }
+}
 
-    // A render pass with only one color or one depth attachment is ok
-    TEST_F(RenderPassDescriptorValidationTest, OneAttachment) {
-        // One color attachment
-        {
-            wgpu::TextureView color =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            utils::ComboRenderPassDescriptor renderPass({color});
+// Test sparse color attachment validations
+TEST_F(RenderPassDescriptorValidationTest, SparseColorAttachment) {
+    // Having sparse color attachment is valid.
+    {
+        std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+        colorAttachments[0].view = nullptr;
 
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-        // One depth-stencil attachment
+        colorAttachments[1].view =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        colorAttachments[1].loadOp = wgpu::LoadOp::Load;
+        colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = colorAttachments.size();
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // When all color attachments are null
+    {
+        std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+        colorAttachments[0].view = nullptr;
+        colorAttachments[1].view = nullptr;
+
+        // Control case: depth stencil attachment is not null is valid.
         {
-            wgpu::TextureView depthStencil =
+            wgpu::TextureView depthStencilView =
                 Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencil);
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Test OOB color attachment indices are handled
-    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentOutOfBounds) {
-        std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments + 1> colorAttachments;
-        for (uint32_t i = 0; i < colorAttachments.size(); i++) {
-            colorAttachments[i].view =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            colorAttachments[i].resolveTarget = nullptr;
-            colorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
-            colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
-            colorAttachments[i].storeOp = wgpu::StoreOp::Store;
-        }
-
-        // Control case: kMaxColorAttachments is valid.
-        {
-            wgpu::RenderPassDescriptor renderPass;
-            renderPass.colorAttachmentCount = kMaxColorAttachments;
-            renderPass.colorAttachments = colorAttachments.data();
-            renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: kMaxColorAttachments + 1 is an error.
-        {
-            wgpu::RenderPassDescriptor renderPass;
-            renderPass.colorAttachmentCount = kMaxColorAttachments + 1;
-            renderPass.colorAttachments = colorAttachments.data();
-            renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Test sparse color attachment validations
-    TEST_F(RenderPassDescriptorValidationTest, SparseColorAttachment) {
-        // Having sparse color attachment is valid.
-        {
-            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
-            colorAttachments[0].view = nullptr;
-
-            colorAttachments[1].view =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            colorAttachments[1].loadOp = wgpu::LoadOp::Load;
-            colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+            wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
+            depthStencilAttachment.view = depthStencilView;
+            depthStencilAttachment.depthClearValue = 1.0f;
+            depthStencilAttachment.stencilClearValue = 0;
+            depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
+            depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+            depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Clear;
+            depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
 
             wgpu::RenderPassDescriptor renderPass;
             renderPass.colorAttachmentCount = colorAttachments.size();
             renderPass.colorAttachments = colorAttachments.data();
+            renderPass.depthStencilAttachment = &depthStencilAttachment;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Error case: depth stencil attachment being null is invalid.
+        {
+            wgpu::RenderPassDescriptor renderPass;
+            renderPass.colorAttachmentCount = colorAttachments.size();
+            renderPass.colorAttachments = colorAttachments.data();
             renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // When all color attachments are null
-        {
-            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
-            colorAttachments[0].view = nullptr;
-            colorAttachments[1].view = nullptr;
-
-            // Control case: depth stencil attachment is not null is valid.
-            {
-                wgpu::TextureView depthStencilView =
-                    Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-                wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
-                depthStencilAttachment.view = depthStencilView;
-                depthStencilAttachment.depthClearValue = 1.0f;
-                depthStencilAttachment.stencilClearValue = 0;
-                depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
-                depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
-                depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Clear;
-                depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
-
-                wgpu::RenderPassDescriptor renderPass;
-                renderPass.colorAttachmentCount = colorAttachments.size();
-                renderPass.colorAttachments = colorAttachments.data();
-                renderPass.depthStencilAttachment = &depthStencilAttachment;
-                AssertBeginRenderPassSuccess(&renderPass);
-            }
-
-            // Error case: depth stencil attachment being null is invalid.
-            {
-                wgpu::RenderPassDescriptor renderPass;
-                renderPass.colorAttachmentCount = colorAttachments.size();
-                renderPass.colorAttachments = colorAttachments.data();
-                renderPass.depthStencilAttachment = nullptr;
-                AssertBeginRenderPassError(&renderPass);
-            }
-        }
-    }
-
-    // Check that the render pass color attachment must have the RenderAttachment usage.
-    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentInvalidUsage) {
-        // Control case: using a texture with RenderAttachment is valid.
-        {
-            wgpu::TextureView renderView =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            utils::ComboRenderPassDescriptor renderPass({renderView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: using a texture with Sampled is invalid.
-        {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.usage = wgpu::TextureUsage::TextureBinding;
-            texDesc.size = {1, 1, 1};
-            texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
-
-            utils::ComboRenderPassDescriptor renderPass({sampledTex.CreateView()});
             AssertBeginRenderPassError(&renderPass);
         }
     }
+}
 
-    // Attachments must have the same size
-    TEST_F(RenderPassDescriptorValidationTest, SizeMustMatch) {
-        wgpu::TextureView color1x1A =
+// Check that the render pass color attachment must have the RenderAttachment usage.
+TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentInvalidUsage) {
+    // Control case: using a texture with RenderAttachment is valid.
+    {
+        wgpu::TextureView renderView =
             Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView color1x1B =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView color2x2 =
-            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::RGBA8Unorm);
-
-        wgpu::TextureView depthStencil1x1 =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-        wgpu::TextureView depthStencil2x2 =
-            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        // Control case: all the same size (1x1)
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil1x1);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // One of the color attachments has a different size
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color2x2});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // The depth stencil attachment has a different size
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil2x2);
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Attachments formats must match whether they are used for color or depth-stencil
-    TEST_F(RenderPassDescriptorValidationTest, FormatMismatch) {
-        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView depthStencil =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        // Using depth-stencil for color
-        {
-            utils::ComboRenderPassDescriptor renderPass({depthStencil});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using color for depth-stencil
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, color);
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Depth and stencil storeOps can be different
-    TEST_F(RenderPassDescriptorValidationTest, DepthStencilStoreOpMismatch) {
-        constexpr uint32_t kArrayLayers = 1;
-        constexpr uint32_t kLevelCount = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureViewDimension::e2D;
-        descriptor.baseArrayLayer = 0;
-        descriptor.arrayLayerCount = kArrayLayers;
-        descriptor.baseMipLevel = 0;
-        descriptor.mipLevelCount = kLevelCount;
-        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-
-        // Base case: StoreOps match so render pass is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Base case: StoreOps match so render pass is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // StoreOps mismatch still is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Currently only texture views with arrayLayerCount == 1 are allowed to be color and depth
-    // stencil attachments
-    TEST_F(RenderPassDescriptorValidationTest, TextureViewLayerCountForColorAndDepthStencil) {
-        constexpr uint32_t kLevelCount = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        constexpr uint32_t kArrayLayers = 10;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor baseDescriptor;
-        baseDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-        baseDescriptor.baseArrayLayer = 0;
-        baseDescriptor.arrayLayerCount = kArrayLayers;
-        baseDescriptor.baseMipLevel = 0;
-        baseDescriptor.mipLevelCount = kLevelCount;
-
-        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.arrayLayerCount = 5;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.arrayLayerCount = 5;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the first layer of the texture is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the first layer is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the last layer is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseArrayLayer = kArrayLayers - 1;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the last layer is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseArrayLayer = kArrayLayers - 1;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Check that the render pass depth attachment must have the RenderAttachment usage.
-    TEST_F(RenderPassDescriptorValidationTest, DepthAttachmentInvalidUsage) {
-        // Control case: using a texture with RenderAttachment is valid.
-        {
-            wgpu::TextureView renderView =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth32Float);
-            utils::ComboRenderPassDescriptor renderPass({}, renderView);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: using a texture with Sampled is invalid.
-        {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.usage = wgpu::TextureUsage::TextureBinding;
-            texDesc.size = {1, 1, 1};
-            texDesc.format = wgpu::TextureFormat::Depth32Float;
-            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
-            wgpu::TextureView sampledView = sampledTex.CreateView();
-
-            utils::ComboRenderPassDescriptor renderPass({}, sampledView);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Only 2D texture views with mipLevelCount == 1 are allowed to be color attachments
-    TEST_F(RenderPassDescriptorValidationTest, TextureViewLevelCountForColorAndDepthStencil) {
-        constexpr uint32_t kArrayLayers = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        constexpr uint32_t kLevelCount = 4;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor baseDescriptor;
-        baseDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-        baseDescriptor.baseArrayLayer = 0;
-        baseDescriptor.arrayLayerCount = kArrayLayers;
-        baseDescriptor.baseMipLevel = 0;
-        baseDescriptor.mipLevelCount = kLevelCount;
-
-        // Using 2D texture view with mipLevelCount > 1 is not allowed for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.mipLevelCount = 2;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D texture view with mipLevelCount > 1 is not allowed for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.mipLevelCount = 2;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D texture view that covers the first level of the texture is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the first level is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the last level is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseMipLevel = kLevelCount - 1;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the last level is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseMipLevel = kLevelCount - 1;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // It is not allowed to set resolve target when the color attachment is non-multisampled.
-    TEST_F(RenderPassDescriptorValidationTest, NonMultisampledColorWithResolveTarget) {
-        static constexpr uint32_t kArrayLayers = 1;
-        static constexpr uint32_t kLevelCount = 1;
-        static constexpr uint32_t kSize = 32;
-        static constexpr uint32_t kSampleCount = 1;
-        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::Texture resolveTargetTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::TextureView colorTextureView = colorTexture.CreateView();
-        wgpu::TextureView resolveTargetTextureView = resolveTargetTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    class MultisampledRenderPassDescriptorValidationTest
-        : public RenderPassDescriptorValidationTest {
-      public:
-        utils::ComboRenderPassDescriptor CreateMultisampledRenderPass() {
-            return utils::ComboRenderPassDescriptor({CreateMultisampledColorTextureView()});
-        }
-
-        wgpu::TextureView CreateMultisampledColorTextureView() {
-            return CreateColorTextureView(kSampleCount);
-        }
-
-        wgpu::TextureView CreateNonMultisampledColorTextureView() {
-            return CreateColorTextureView(1);
-        }
-
-        static constexpr uint32_t kArrayLayers = 1;
-        static constexpr uint32_t kLevelCount = 1;
-        static constexpr uint32_t kSize = 32;
-        static constexpr uint32_t kSampleCount = 4;
-        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
-      private:
-        wgpu::TextureView CreateColorTextureView(uint32_t sampleCount) {
-            wgpu::Texture colorTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                              kArrayLayers, kLevelCount, sampleCount);
-
-            return colorTexture.CreateView();
-        }
-    };
-
-    // Tests on the use of multisampled textures as color attachments
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorAttachments) {
-        wgpu::TextureView colorTextureView = CreateNonMultisampledColorTextureView();
-        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
-        wgpu::TextureView multisampledColorTextureView = CreateMultisampledColorTextureView();
-
-        // It is allowed to use a multisampled color attachment without setting resolve target.
-        {
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // It is not allowed to use multiple color attachments with different sample counts.
-        {
-            utils::ComboRenderPassDescriptor renderPass(
-                {multisampledColorTextureView, colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // It is not allowed to use a multisampled resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledResolveTarget) {
-        wgpu::TextureView multisampledResolveTargetView = CreateMultisampledColorTextureView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = multisampledResolveTargetView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target with array layer count > 1.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetArrayLayerMoreThanOne) {
-        constexpr uint32_t kArrayLayers2 = 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers2, kLevelCount);
-        wgpu::TextureViewDescriptor viewDesc;
-        viewDesc.dimension = wgpu::TextureViewDimension::e2DArray;
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&viewDesc);
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target with mipmap level count > 1.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetMipmapLevelMoreThanOne) {
-        constexpr uint32_t kLevelCount2 = 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount2);
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target which is created from a texture whose usage does
-    // not include wgpu::TextureUsage::RenderAttachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetUsageNoRenderAttachment) {
-        constexpr wgpu::TextureUsage kUsage =
-            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture nonColorUsageResolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, 1, kUsage);
-        wgpu::TextureView nonColorUsageResolveTextureView =
-            nonColorUsageResolveTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = nonColorUsageResolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target which is in error state.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetInErrorState) {
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::TextureViewDescriptor errorTextureView;
-        errorTextureView.dimension = wgpu::TextureViewDimension::e2D;
-        errorTextureView.format = kColorFormat;
-        errorTextureView.baseArrayLayer = kArrayLayers + 1;
-        ASSERT_DEVICE_ERROR(wgpu::TextureView errorResolveTarget =
-                                resolveTexture.CreateView(&errorTextureView));
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = errorResolveTarget;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is allowed to use a multisampled color attachment and a non-multisampled resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorWithResolveTarget) {
-        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+        utils::ComboRenderPassDescriptor renderPass({renderView});
         AssertBeginRenderPassSuccess(&renderPass);
     }
 
-    // It is not allowed to use a resolve target in a format different from the color attachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetDifferentFormat) {
-        constexpr wgpu::TextureFormat kColorFormat2 = wgpu::TextureFormat::BGRA8Unorm;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat2, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+    // Error case: using a texture with Sampled is invalid.
+    {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.usage = wgpu::TextureUsage::TextureBinding;
+        texDesc.size = {1, 1, 1};
+        texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+
+        utils::ComboRenderPassDescriptor renderPass({sampledTex.CreateView()});
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Attachments must have the same size
+TEST_F(RenderPassDescriptorValidationTest, SizeMustMatch) {
+    wgpu::TextureView color1x1A = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView color1x1B = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView color2x2 = Create2DAttachment(device, 2, 2, wgpu::TextureFormat::RGBA8Unorm);
+
+    wgpu::TextureView depthStencil1x1 =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+    wgpu::TextureView depthStencil2x2 =
+        Create2DAttachment(device, 2, 2, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    // Control case: all the same size (1x1)
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil1x1);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // One of the color attachments has a different size
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color2x2});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // The depth stencil attachment has a different size
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil2x2);
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Attachments formats must match whether they are used for color or depth-stencil
+TEST_F(RenderPassDescriptorValidationTest, FormatMismatch) {
+    wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView depthStencil =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    // Using depth-stencil for color
+    {
+        utils::ComboRenderPassDescriptor renderPass({depthStencil});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using color for depth-stencil
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, color);
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Depth and stencil storeOps can be different
+TEST_F(RenderPassDescriptorValidationTest, DepthStencilStoreOpMismatch) {
+    constexpr uint32_t kArrayLayers = 1;
+    constexpr uint32_t kLevelCount = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureViewDimension::e2D;
+    descriptor.baseArrayLayer = 0;
+    descriptor.arrayLayerCount = kArrayLayers;
+    descriptor.baseMipLevel = 0;
+    descriptor.mipLevelCount = kLevelCount;
+    wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+    wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+
+    // Base case: StoreOps match so render pass is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Base case: StoreOps match so render pass is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // StoreOps mismatch still is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Currently only texture views with arrayLayerCount == 1 are allowed to be color and depth
+// stencil attachments
+TEST_F(RenderPassDescriptorValidationTest, TextureViewLayerCountForColorAndDepthStencil) {
+    constexpr uint32_t kLevelCount = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    constexpr uint32_t kArrayLayers = 10;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor baseDescriptor;
+    baseDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+    baseDescriptor.baseArrayLayer = 0;
+    baseDescriptor.arrayLayerCount = kArrayLayers;
+    baseDescriptor.baseMipLevel = 0;
+    baseDescriptor.mipLevelCount = kLevelCount;
+
+    // Using 2D array texture view with arrayLayerCount > 1 is not allowed for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.arrayLayerCount = 5;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D array texture view with arrayLayerCount > 1 is not allowed for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.arrayLayerCount = 5;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the first layer of the texture is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the first layer is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the last layer is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseArrayLayer = kArrayLayers - 1;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the last layer is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseArrayLayer = kArrayLayers - 1;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Check that the render pass depth attachment must have the RenderAttachment usage.
+TEST_F(RenderPassDescriptorValidationTest, DepthAttachmentInvalidUsage) {
+    // Control case: using a texture with RenderAttachment is valid.
+    {
+        wgpu::TextureView renderView =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth32Float);
+        utils::ComboRenderPassDescriptor renderPass({}, renderView);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Error case: using a texture with Sampled is invalid.
+    {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.usage = wgpu::TextureUsage::TextureBinding;
+        texDesc.size = {1, 1, 1};
+        texDesc.format = wgpu::TextureFormat::Depth32Float;
+        wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+        wgpu::TextureView sampledView = sampledTex.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({}, sampledView);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Only 2D texture views with mipLevelCount == 1 are allowed to be color attachments
+TEST_F(RenderPassDescriptorValidationTest, TextureViewLevelCountForColorAndDepthStencil) {
+    constexpr uint32_t kArrayLayers = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    constexpr uint32_t kLevelCount = 4;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor baseDescriptor;
+    baseDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    baseDescriptor.baseArrayLayer = 0;
+    baseDescriptor.arrayLayerCount = kArrayLayers;
+    baseDescriptor.baseMipLevel = 0;
+    baseDescriptor.mipLevelCount = kLevelCount;
+
+    // Using 2D texture view with mipLevelCount > 1 is not allowed for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.mipLevelCount = 2;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D texture view with mipLevelCount > 1 is not allowed for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.mipLevelCount = 2;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D texture view that covers the first level of the texture is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the first level is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the last level is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseMipLevel = kLevelCount - 1;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the last level is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseMipLevel = kLevelCount - 1;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// It is not allowed to set resolve target when the color attachment is non-multisampled.
+TEST_F(RenderPassDescriptorValidationTest, NonMultisampledColorWithResolveTarget) {
+    static constexpr uint32_t kArrayLayers = 1;
+    static constexpr uint32_t kLevelCount = 1;
+    static constexpr uint32_t kSize = 32;
+    static constexpr uint32_t kSampleCount = 1;
+    static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture colorTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, kSampleCount);
+    wgpu::Texture resolveTargetTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, kSampleCount);
+    wgpu::TextureView colorTextureView = colorTexture.CreateView();
+    wgpu::TextureView resolveTargetTextureView = resolveTargetTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+    renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+class MultisampledRenderPassDescriptorValidationTest : public RenderPassDescriptorValidationTest {
+  public:
+    utils::ComboRenderPassDescriptor CreateMultisampledRenderPass() {
+        return utils::ComboRenderPassDescriptor({CreateMultisampledColorTextureView()});
+    }
+
+    wgpu::TextureView CreateMultisampledColorTextureView() {
+        return CreateColorTextureView(kSampleCount);
+    }
+
+    wgpu::TextureView CreateNonMultisampledColorTextureView() { return CreateColorTextureView(1); }
+
+    static constexpr uint32_t kArrayLayers = 1;
+    static constexpr uint32_t kLevelCount = 1;
+    static constexpr uint32_t kSize = 32;
+    static constexpr uint32_t kSampleCount = 4;
+    static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+  private:
+    wgpu::TextureView CreateColorTextureView(uint32_t sampleCount) {
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, sampleCount);
+
+        return colorTexture.CreateView();
+    }
+};
+
+// Tests on the use of multisampled textures as color attachments
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorAttachments) {
+    wgpu::TextureView colorTextureView = CreateNonMultisampledColorTextureView();
+    wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+    wgpu::TextureView multisampledColorTextureView = CreateMultisampledColorTextureView();
+
+    // It is allowed to use a multisampled color attachment without setting resolve target.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // It is not allowed to use multiple color attachments with different sample counts.
+    {
+        utils::ComboRenderPassDescriptor renderPass(
+            {multisampledColorTextureView, colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// It is not allowed to use a multisampled resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledResolveTarget) {
+    wgpu::TextureView multisampledResolveTargetView = CreateMultisampledColorTextureView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = multisampledResolveTargetView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target with array layer count > 1.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetArrayLayerMoreThanOne) {
+    constexpr uint32_t kArrayLayers2 = 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers2, kLevelCount);
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.dimension = wgpu::TextureViewDimension::e2DArray;
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&viewDesc);
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target with mipmap level count > 1.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetMipmapLevelMoreThanOne) {
+    constexpr uint32_t kLevelCount2 = 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers, kLevelCount2);
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target which is created from a texture whose usage does
+// not include wgpu::TextureUsage::RenderAttachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetUsageNoRenderAttachment) {
+    constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture nonColorUsageResolveTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, 1, kUsage);
+    wgpu::TextureView nonColorUsageResolveTextureView = nonColorUsageResolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = nonColorUsageResolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target which is in error state.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetInErrorState) {
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::TextureViewDescriptor errorTextureView;
+    errorTextureView.dimension = wgpu::TextureViewDimension::e2D;
+    errorTextureView.format = kColorFormat;
+    errorTextureView.baseArrayLayer = kArrayLayers + 1;
+    ASSERT_DEVICE_ERROR(wgpu::TextureView errorResolveTarget =
+                            resolveTexture.CreateView(&errorTextureView));
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = errorResolveTarget;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is allowed to use a multisampled color attachment and a non-multisampled resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorWithResolveTarget) {
+    wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+    AssertBeginRenderPassSuccess(&renderPass);
+}
+
+// It is not allowed to use a resolve target in a format different from the color attachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetDifferentFormat) {
+    constexpr wgpu::TextureFormat kColorFormat2 = wgpu::TextureFormat::BGRA8Unorm;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat2,
+                                                 kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// Tests on the size of the resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest,
+       ColorAttachmentResolveTargetDimensionMismatch) {
+    constexpr uint32_t kSize2 = kSize * 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize2, kSize2, kArrayLayers, kLevelCount + 1);
+
+    wgpu::TextureViewDescriptor textureViewDescriptor;
+    textureViewDescriptor.nextInChain = nullptr;
+    textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    textureViewDescriptor.format = kColorFormat;
+    textureViewDescriptor.mipLevelCount = 1;
+    textureViewDescriptor.baseArrayLayer = 0;
+    textureViewDescriptor.arrayLayerCount = 1;
+
+    {
+        wgpu::TextureViewDescriptor firstMipLevelDescriptor = textureViewDescriptor;
+        firstMipLevelDescriptor.baseMipLevel = 0;
+
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&firstMipLevelDescriptor);
 
         utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
         renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
         AssertBeginRenderPassError(&renderPass);
     }
 
-    // Tests on the size of the resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest,
-           ColorAttachmentResolveTargetDimensionMismatch) {
-        constexpr uint32_t kSize2 = kSize * 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize2, kSize2,
-                          kArrayLayers, kLevelCount + 1);
+    {
+        wgpu::TextureViewDescriptor secondMipLevelDescriptor = textureViewDescriptor;
+        secondMipLevelDescriptor.baseMipLevel = 1;
 
-        wgpu::TextureViewDescriptor textureViewDescriptor;
-        textureViewDescriptor.nextInChain = nullptr;
-        textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-        textureViewDescriptor.format = kColorFormat;
-        textureViewDescriptor.mipLevelCount = 1;
-        textureViewDescriptor.baseArrayLayer = 0;
-        textureViewDescriptor.arrayLayerCount = 1;
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&secondMipLevelDescriptor);
 
-        {
-            wgpu::TextureViewDescriptor firstMipLevelDescriptor = textureViewDescriptor;
-            firstMipLevelDescriptor.baseMipLevel = 0;
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
 
-            wgpu::TextureView resolveTextureView =
-                resolveTexture.CreateView(&firstMipLevelDescriptor);
+// Tests the texture format of the resolve target must support being used as resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetFormat) {
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        if (!utils::TextureFormatSupportsMultisampling(format) ||
+            !utils::TextureFormatSupportsRendering(format)) {
+            continue;
+        }
 
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, format, kSize, kSize, kArrayLayers,
+                          kLevelCount, kSampleCount);
+        wgpu::Texture resolveTarget = CreateTexture(device, wgpu::TextureDimension::e2D, format,
+                                                    kSize, kSize, kArrayLayers, kLevelCount, 1);
+
+        utils::ComboRenderPassDescriptor renderPass({colorTexture.CreateView()});
+        renderPass.cColorAttachments[0].resolveTarget = resolveTarget.CreateView();
+        if (utils::TextureFormatSupportsResolveTarget(format)) {
+            AssertBeginRenderPassSuccess(&renderPass);
+        } else {
             AssertBeginRenderPassError(&renderPass);
         }
-
-        {
-            wgpu::TextureViewDescriptor secondMipLevelDescriptor = textureViewDescriptor;
-            secondMipLevelDescriptor.baseMipLevel = 1;
-
-            wgpu::TextureView resolveTextureView =
-                resolveTexture.CreateView(&secondMipLevelDescriptor);
-
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
     }
+}
 
-    // Tests the texture format of the resolve target must support being used as resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetFormat) {
-        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
-            if (!utils::TextureFormatSupportsMultisampling(format) ||
-                !utils::TextureFormatSupportsRendering(format)) {
-                continue;
-            }
+// Tests on the sample count of depth stencil attachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, DepthStencilAttachmentSampleCount) {
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+    wgpu::Texture multisampledDepthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount, kSampleCount);
+    wgpu::TextureView multisampledDepthStencilTextureView =
+        multisampledDepthStencilTexture.CreateView();
 
-            wgpu::Texture colorTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, format, kSize, kSize,
-                              kArrayLayers, kLevelCount, kSampleCount);
-            wgpu::Texture resolveTarget = CreateTexture(device, wgpu::TextureDimension::e2D, format,
-                                                        kSize, kSize, kArrayLayers, kLevelCount, 1);
-
-            utils::ComboRenderPassDescriptor renderPass({colorTexture.CreateView()});
-            renderPass.cColorAttachments[0].resolveTarget = resolveTarget.CreateView();
-            if (utils::TextureFormatSupportsResolveTarget(format)) {
-                AssertBeginRenderPassSuccess(&renderPass);
-            } else {
-                AssertBeginRenderPassError(&renderPass);
-            }
-        }
-    }
-
-    // Tests on the sample count of depth stencil attachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, DepthStencilAttachmentSampleCount) {
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-        wgpu::Texture multisampledDepthStencilTexture =
+    // It is not allowed to use a depth stencil attachment whose sample count is different from
+    // the one of the color attachment.
+    {
+        wgpu::Texture depthStencilTexture =
             CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::TextureView multisampledDepthStencilTextureView =
-            multisampledDepthStencilTexture.CreateView();
+                          kArrayLayers, kLevelCount);
+        wgpu::TextureView depthStencilTextureView = depthStencilTexture.CreateView();
 
-        // It is not allowed to use a depth stencil attachment whose sample count is different from
-        // the one of the color attachment.
-        {
-            wgpu::Texture depthStencilTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize,
-                              kSize, kArrayLayers, kLevelCount);
-            wgpu::TextureView depthStencilTextureView = depthStencilTexture.CreateView();
-
-            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
-                                                        depthStencilTextureView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({CreateNonMultisampledColorTextureView()},
-                                                        multisampledDepthStencilTextureView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // It is allowed to use a multisampled depth stencil attachment whose sample count is equal
-        // to the one of the color attachment.
-        {
-            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
-                                                        multisampledDepthStencilTextureView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // It is allowed to use a multisampled depth stencil attachment while there is no color
-        // attachment.
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, multisampledDepthStencilTextureView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
+        utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                    depthStencilTextureView);
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    // Tests that NaN cannot be accepted as a valid color or depth clear value and INFINITY is valid
-    // in both color and depth clear values.
-    TEST_F(RenderPassDescriptorValidationTest, UseNaNOrINFINITYAsColorOrDepthClearValue) {
-        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-
-        // Tests that NaN cannot be used in clearColor.
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.r = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.g = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.b = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.a = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that INFINITY can be used in clearColor.
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.r = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.g = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.b = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.a = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that NaN cannot be used in depthClearValue.
-        {
-            wgpu::TextureView depth =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-            utils::ComboRenderPassDescriptor renderPass({color}, depth);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthClearValue = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that INFINITY can be used in depthClearValue.
-        {
-            wgpu::TextureView depth =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-            utils::ComboRenderPassDescriptor renderPass({color}, depth);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthClearValue = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for clearStencil for stencilOnly
-        // once stencil8 is supported.
+    {
+        utils::ComboRenderPassDescriptor renderPass({CreateNonMultisampledColorTextureView()},
+                                                    multisampledDepthStencilTextureView);
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilReadOnly) {
-        wgpu::TextureView colorView =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView depthStencilView =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-        wgpu::TextureView depthStencilViewNoStencil =
+    // It is allowed to use a multisampled depth stencil attachment whose sample count is equal
+    // to the one of the color attachment.
+    {
+        utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                    multisampledDepthStencilTextureView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // It is allowed to use a multisampled depth stencil attachment while there is no color
+    // attachment.
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, multisampledDepthStencilTextureView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Tests that NaN cannot be accepted as a valid color or depth clear value and INFINITY is valid
+// in both color and depth clear values.
+TEST_F(RenderPassDescriptorValidationTest, UseNaNOrINFINITYAsColorOrDepthClearValue) {
+    wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+
+    // Tests that NaN cannot be used in clearColor.
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.r = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.g = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.b = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.a = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that INFINITY can be used in clearColor.
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.r = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.g = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.b = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.a = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that NaN cannot be used in depthClearValue.
+    {
+        wgpu::TextureView depth =
             Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-
-        // Tests that a read-only pass with depthReadOnly set to true succeeds.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values passes when
-        // there is no stencil component in the format (deprecated).
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            EXPECT_DEPRECATION_WARNING(AssertBeginRenderPassSuccess(&renderPass));
-        }
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
-        // there there is no stencil component in the format and stencil loadOp/storeOp are passed.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            AssertBeginRenderPassError(&renderPass);
-
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with depthReadOnly=true and stencilReadOnly=true can pass
-        // when there is only depth component in the format. We actually enable readonly
-        // depth/stencil attachment in this case.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that a pass with depthReadOnly=false and stencilReadOnly=true can pass
-        // when there is only depth component in the format. We actually don't enable readonly
-        // depth/stencil attachment in this case.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = false;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only once stencil8 is
-        // supported (depthReadOnly and stencilReadOnly mismatch but no depth component).
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
-        // both depth and stencil components exist.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with loadOp set to clear and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with storeOp set to discard and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only depthLoadOp set to load and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only depthStoreOp set to store and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only stencilLoadOp set to load and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only stencilStoreOp set to store and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
+        utils::ComboRenderPassDescriptor renderPass({color}, depth);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthClearValue = NAN;
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    // Check that the depth stencil attachment must use all aspects.
-    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilAllAspects) {
-        wgpu::TextureDescriptor texDesc;
-        texDesc.usage = wgpu::TextureUsage::RenderAttachment;
-        texDesc.size = {1, 1, 1};
-
-        wgpu::TextureViewDescriptor viewDesc;
-        viewDesc.baseMipLevel = 0;
-        viewDesc.mipLevelCount = 1;
-        viewDesc.baseArrayLayer = 0;
-        viewDesc.arrayLayerCount = 1;
-
-        // Using all aspects of a depth+stencil texture is allowed.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::All;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using only depth of a depth+stencil texture is an error.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using only stencil of a depth+stencil texture is an error.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using DepthOnly of a depth only texture is allowed.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24Plus;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only on stencil8 once this
-        // format is supported.
+    // Tests that INFINITY can be used in depthClearValue.
+    {
+        wgpu::TextureView depth =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+        utils::ComboRenderPassDescriptor renderPass({color}, depth);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthClearValue = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
     }
 
-    // TODO(cwallez@chromium.org): Constraints on attachment aliasing?
+    // TODO(https://crbug.com/dawn/666): Add a test case for clearStencil for stencilOnly
+    // once stencil8 is supported.
+}
+
+TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilReadOnly) {
+    wgpu::TextureView colorView = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView depthStencilView =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+    wgpu::TextureView depthStencilViewNoStencil =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+
+    // Tests that a read-only pass with depthReadOnly set to true succeeds.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values passes when
+    // there is no stencil component in the format (deprecated).
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        EXPECT_DEPRECATION_WARNING(AssertBeginRenderPassSuccess(&renderPass));
+    }
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+    // there there is no stencil component in the format and stencil loadOp/storeOp are passed.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        AssertBeginRenderPassError(&renderPass);
+
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with depthReadOnly=true and stencilReadOnly=true can pass
+    // when there is only depth component in the format. We actually enable readonly
+    // depth/stencil attachment in this case.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that a pass with depthReadOnly=false and stencilReadOnly=true can pass
+    // when there is only depth component in the format. We actually don't enable readonly
+    // depth/stencil attachment in this case.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = false;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only once stencil8 is
+    // supported (depthReadOnly and stencilReadOnly mismatch but no depth component).
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+    // both depth and stencil components exist.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with loadOp set to clear and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with storeOp set to discard and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only depthLoadOp set to load and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only depthStoreOp set to store and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only stencilLoadOp set to load and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only stencilStoreOp set to store and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Check that the depth stencil attachment must use all aspects.
+TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilAllAspects) {
+    wgpu::TextureDescriptor texDesc;
+    texDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    texDesc.size = {1, 1, 1};
+
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.baseMipLevel = 0;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = 0;
+    viewDesc.arrayLayerCount = 1;
+
+    // Using all aspects of a depth+stencil texture is allowed.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::All;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using only depth of a depth+stencil texture is an error.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using only stencil of a depth+stencil texture is an error.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using DepthOnly of a depth only texture is allowed.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24Plus;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only on stencil8 once this
+    // format is supported.
+}
+
+// TODO(cwallez@chromium.org): Constraints on attachment aliasing?
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
index 8a936c9..079f9b3 100644
--- a/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
@@ -17,8 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -49,11 +49,11 @@
 };
 
 namespace {
-    bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
-        return blendFactor == wgpu::BlendFactor::SrcAlpha ||
-               blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
-               blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
-    }
+bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+    return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+           blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+           blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+}
 }  // namespace
 
 // Test cases where creation should succeed
diff --git a/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
index c2902c4..8e842b3 100644
--- a/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
+++ b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
@@ -21,1510 +21,927 @@
 
 namespace {
 
-    class ResourceUsageTrackingTest : public ValidationTest {
-      protected:
-        wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
-            wgpu::BufferDescriptor descriptor;
-            descriptor.size = size;
-            descriptor.usage = usage;
+class ResourceUsageTrackingTest : public ValidationTest {
+  protected:
+    wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
 
-            return device.CreateBuffer(&descriptor);
-        }
+        return device.CreateBuffer(&descriptor);
+    }
 
-        wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
-                                    wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size = {1, 1, 1};
-            descriptor.sampleCount = 1;
-            descriptor.mipLevelCount = 1;
-            descriptor.usage = usage;
-            descriptor.format = format;
+    wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
+                                wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size = {1, 1, 1};
+        descriptor.sampleCount = 1;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = usage;
+        descriptor.format = format;
 
-            return device.CreateTexture(&descriptor);
-        }
+        return device.CreateTexture(&descriptor);
+    }
 
-        // Note that it is valid to bind any bind groups for indices that the pipeline doesn't use.
-        // We create a no-op render or compute pipeline without any bindings, and set bind groups
-        // in the caller, so it is always correct for binding validation between bind groups and
-        // pipeline. But those bind groups in caller can be used for validation for other purposes.
-        wgpu::RenderPipeline CreateNoOpRenderPipeline() {
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+    // Note that it is valid to bind any bind groups for indices that the pipeline doesn't use.
+    // We create a no-op render or compute pipeline without any bindings, and set bind groups
+    // in the caller, so it is always correct for binding validation between bind groups and
+    // pipeline. But those bind groups in caller can be used for validation for other purposes.
+    wgpu::RenderPipeline CreateNoOpRenderPipeline() {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
-            return device.CreateRenderPipeline(&pipelineDescriptor);
-        }
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
 
-        wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
-            wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+    wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
                 @stage(compute) @workgroup_size(1) fn main() {
                 })");
-            wgpu::ComputePipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
-            pipelineDescriptor.compute.module = csModule;
-            pipelineDescriptor.compute.entryPoint = "main";
-            return device.CreateComputePipeline(&pipelineDescriptor);
-        }
-
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
-    };
-
-    // Test that using a single buffer in multiple read usages in the same pass is allowed.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleReadUsage) {
-        // Test render pass
-        {
-            // Create a buffer, and use the buffer as both vertex and index buffer.
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetVertexBuffer(0, buffer);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
-                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Use the buffer as both uniform and readonly storage buffer in compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            encoder.Finish();
-        }
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
+        pipelineDescriptor.compute.module = csModule;
+        pipelineDescriptor.compute.entryPoint = "main";
+        return device.CreateComputePipeline(&pipelineDescriptor);
     }
 
-    // Test that it is invalid to use the same buffer as both readable and writable in the same
-    // render pass. It is invalid in the same dispatch in compute pass.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsage) {
-        // test render pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+};
 
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+// Test that using a single buffer in multiple read usages in the same pass is allowed.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleReadUsage) {
+    // Test render pass
+    {
+        // Create a buffer, and use the buffer as both vertex and index buffer.
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
 
-            // It is invalid to use the buffer as both index and storage in render pass
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg =
-                utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the buffer as both storage and readonly storage in a single
-            // compute pass if dispatch command is not called.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is invalid to use the buffer as both storage and readonly storage in a single
-            // dispatch.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetPipeline(cp);
-                pass.SetBindGroup(0, bg);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetVertexBuffer(0, buffer);
+        pass.End();
+        encoder.Finish();
     }
 
-    // Test the use of a buffer as a storage buffer multiple times in the same synchronization
-    // scope.
-    TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
+    // Test compute pass
+    {
+        // Create buffer and bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+        // Use the buffer as both uniform and readonly storage buffer in compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in the same
+// render pass. It is invalid in the same dispatch in compute pass.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsage) {
+    // test render pass
+    {
+        // Create buffer and bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // It is invalid to use the buffer as both index and storage in render pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
         // Create buffer and bind group
         wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
 
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::BufferBindingType::Storage},
-                     {1, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::BufferBindingType::Storage}});
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
         wgpu::BindGroup bg =
             utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
 
-        // test render pass
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the buffer as both storage and readonly storage in a single
+        // compute pass if dispatch command is not called.
         {
-            // It is valid to use multiple storage usages on the same buffer in render pass
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetBindGroup(0, bg);
             pass.End();
             encoder.Finish();
         }
 
-        // test compute pass
+        // It is invalid to use the buffer as both storage and readonly storage in a single
+        // dispatch.
         {
-            // It is valid to use multiple storage usages on the same buffer in a dispatch
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetPipeline(cp);
             pass.SetBindGroup(0, bg);
             pass.DispatchWorkgroups(1);
             pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that using the same buffer as both readable and writable in different passes is allowed
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentPasses) {
-        // Test render pass
-        {
-            // Create buffers that will be used as index and storage buffers
-            wgpu::Buffer buffer0 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::Buffer buffer1 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-
-            // Create bind groups to use the buffer as storage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
-
-            // Use these two buffers as both index and storage in different render passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass0.SetBindGroup(0, bg1);
-            pass0.End();
-
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass1.SetBindGroup(0, bg0);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create buffer and bind groups that will be used as storage and uniform bindings
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Use the buffer as both storage and uniform in different compute passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
-            pass1.SetBindGroup(1, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test render pass and compute pass mixed together with resource dependency.
-        {
-            // Create buffer and bind groups that will be used as storage and uniform bindings
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Use the buffer as storage and uniform in render pass and compute pass respectively
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetBindGroup(1, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same buffer as both readable and writable in different
-    // draws in a single render pass. But it is valid in different dispatches in a single compute
-    // pass.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentDrawsOrDispatches) {
-        // Test render pass
-        {
-            // Create a buffer and a bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is not allowed to use the same buffer as both readable and writable in different
-            // draws within the same render pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.Draw(3);
-
-            pass.SetBindGroup(0, bg);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create a buffer and bind groups
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
-            wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
-
-            // It is valid to use the same buffer as both readable and writable in different
-            // dispatches within the same compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-            pass.SetPipeline(cp0);
-            pass.SetBindGroup(0, bg0);
-            pass.DispatchWorkgroups(1);
-
-            pass.SetPipeline(cp1);
-            pass.SetBindGroup(0, bg1);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same buffer as both readable and writable in a single
-    // draw or dispatch.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInSingleDrawOrDispatch) {
-        // Test render pass
-        {
-            // Create a buffer and a bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is invalid to use the same buffer as both readable and writable usages in a single
-            // draw
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create a buffer and bind groups
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
-
-            // It is invalid to use the same buffer as both readable and writable usages in a single
-            // dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
-    // Test that using the same buffer as copy src/dst and writable/readable usage is allowed.
-    TEST_F(ResourceUsageTrackingTest, BufferCopyAndBufferUsageInPass) {
-        // Create buffers that will be used as both a copy src/dst buffer and a storage buffer
-        wgpu::Buffer bufferSrc =
-            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
-        wgpu::Buffer bufferDst =
-            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+// Test the use of a buffer as a storage buffer multiple times in the same synchronization
+// scope.
+TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
+    // Create buffer and bind group
+    wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
 
-        // Create the bind group to use the buffer as storage
-        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, bufferSrc}});
-        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, bufferDst}});
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::BufferBindingType::Storage},
+                 {1, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg =
+        utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
 
-        // Use the buffer as both copy src and storage in render pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Use the buffer as both copy dst and readonly storage in compute pass
-        {
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
-
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg1);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-
-            encoder.Finish();
-        }
+    // test render pass
+    {
+        // It is valid to use multiple storage usages on the same buffer in render pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
     }
 
-    // Test that all index buffers and vertex buffers take effect even though some buffers are
-    // not used because they are overwritten by another consecutive call.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetIndexOrVertexBuffer) {
-        // Create buffers that will be used as both vertex and index buffer.
-        wgpu::Buffer buffer0 = CreateBuffer(
-            4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage);
+    // test compute pass
+    {
+        // It is valid to use multiple storage usages on the same buffer in a dispatch
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that using the same buffer as both readable and writable in different passes is allowed
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentPasses) {
+    // Test render pass
+    {
+        // Create buffers that will be used as index and storage buffers
+        wgpu::Buffer buffer0 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
         wgpu::Buffer buffer1 =
-            CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
 
+        // Create bind groups to use the buffer as storage
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
             device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
+
+        // Use these two buffers as both index and storage in different render passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass0.SetBindGroup(0, bg1);
+        pass0.End();
+
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass1.SetBindGroup(0, bg0);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass
+    {
+        // Create buffer and bind groups that will be used as storage and uniform bindings
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Use the buffer as both storage and uniform in different compute passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+        pass1.SetBindGroup(1, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test render pass and compute pass mixed together with resource dependency.
+    {
+        // Create buffer and bind groups that will be used as storage and uniform bindings
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Use the buffer as storage and uniform in render pass and compute pass respectively
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetBindGroup(1, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in different
+// draws in a single render pass. But it is valid in different dispatches in a single compute
+// pass.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentDrawsOrDispatches) {
+    // Test render pass
+    {
+        // Create a buffer and a bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is not allowed to use the same buffer as both readable and writable in different
+        // draws within the same render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3);
+
+        pass.SetBindGroup(0, bg);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
+        // Create a buffer and bind groups
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
+        wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
+
+        // It is valid to use the same buffer as both readable and writable in different
+        // dispatches within the same compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+        pass.SetPipeline(cp0);
+        pass.SetBindGroup(0, bg0);
+        pass.DispatchWorkgroups(1);
+
+        pass.SetPipeline(cp1);
+        pass.SetBindGroup(0, bg1);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in a single
+// draw or dispatch.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInSingleDrawOrDispatch) {
+    // Test render pass
+    {
+        // Create a buffer and a bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is invalid to use the same buffer as both readable and writable usages in a single
+        // draw
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
+        // Create a buffer and bind groups
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+        // It is invalid to use the same buffer as both readable and writable usages in a single
+        // dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using the same buffer as copy src/dst and writable/readable usage is allowed.
+TEST_F(ResourceUsageTrackingTest, BufferCopyAndBufferUsageInPass) {
+    // Create buffers that will be used as both a copy src/dst buffer and a storage buffer
+    wgpu::Buffer bufferSrc =
+        CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer bufferDst =
+        CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+
+    // Create the bind group to use the buffer as storage
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, bufferSrc}});
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, bufferDst}});
+
+    // Use the buffer as both copy src and storage in render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Use the buffer as both copy dst and readonly storage in compute pass
+    {
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg1);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that all index buffers and vertex buffers take effect even though some buffers are
+// not used because they are overwritten by another consecutive call.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetIndexOrVertexBuffer) {
+    // Create buffers that will be used as both vertex and index buffer.
+    wgpu::Buffer buffer0 = CreateBuffer(
+        4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage);
+    wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+
+    PlaceholderRenderPass PlaceholderRenderPass(device);
+
+    // Set index buffer twice. The second one overwrites the first one. No buffer is used as
+    // both read and write in the same pass. But the overwritten index buffer (buffer0) still
+    // take effect during resource tracking.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set index buffer twice. The second one overwrites the first one. buffer0 is used as both
+    // read and write in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set vertex buffer on the same index twice. The second one overwrites the first one. No
+    // buffer is used as both read and write in the same pass. But the overwritten vertex buffer
+    // (buffer0) still take effect during resource tracking.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetVertexBuffer(0, buffer0);
+        pass.SetVertexBuffer(0, buffer1);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set vertex buffer on the same index twice. The second one overwrites the first one.
+    // buffer0 is used as both read and write in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetVertexBuffer(0, buffer1);
+        pass.SetVertexBuffer(0, buffer0);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+// used because they are overwritten by a consecutive call.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetBindGroupsOnSameIndex) {
+    // test render pass
+    {
+        // Create buffers that will be used as index and storage buffers
+        wgpu::Buffer buffer0 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::Buffer buffer1 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+        // Create the bind group to use the buffer as storage
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
 
         PlaceholderRenderPass PlaceholderRenderPass(device);
 
-        // Set index buffer twice. The second one overwrites the first one. No buffer is used as
-        // both read and write in the same pass. But the overwritten index buffer (buffer0) still
-        // take effect during resource tracking.
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No buffer is used as both read and write in the same pass. But the overwritten
+        // bind group still take effect during resource tracking.
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
             pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(0, bg1);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
 
-        // Set index buffer twice. The second one overwrites the first one. buffer0 is used as both
-        // read and write in the same pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Set vertex buffer on the same index twice. The second one overwrites the first one. No
-        // buffer is used as both read and write in the same pass. But the overwritten vertex buffer
-        // (buffer0) still take effect during resource tracking.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetVertexBuffer(0, buffer0);
-            pass.SetVertexBuffer(0, buffer1);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Set vertex buffer on the same index twice. The second one overwrites the first one.
+        // Set bind group on the same index twice. The second one overwrites the first one.
         // buffer0 is used as both read and write in the same pass
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetVertexBuffer(0, buffer1);
-            pass.SetVertexBuffer(0, buffer0);
-            pass.SetBindGroup(0, bg);
+            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg1);
+            pass.SetBindGroup(0, bg0);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
 
-    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
-    // used because they are overwritten by a consecutive call.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetBindGroupsOnSameIndex) {
-        // test render pass
+    // test compute pass
+    {
+        // Create buffers that will be used as readonly and writable storage buffers
+        wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
+        wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        // Create the bind group to use the buffer as storage
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, buffer0, 0, 4}});
+        wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, buffer0, 256, 4}});
+        wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+        // Set bind group against the same index twice. The second one overwrites the first one.
+        // Then no buffer is used as both read and write in the same dispatch. But the
+        // overwritten bind group still take effect.
         {
-            // Create buffers that will be used as index and storage buffers
-            wgpu::Buffer buffer0 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::Buffer buffer1 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-
-            // Create the bind group to use the buffer as storage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No buffer is used as both read and write in the same pass. But the overwritten
-            // bind group still take effect during resource tracking.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-                pass.SetBindGroup(0, bg0);
-                pass.SetBindGroup(0, bg1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // buffer0 is used as both read and write in the same pass
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-                pass.SetBindGroup(0, bg1);
-                pass.SetBindGroup(0, bg0);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-
-        // test compute pass
-        {
-            // Create buffers that will be used as readonly and writable storage buffers
-            wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
-            wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            // Create the bind group to use the buffer as storage
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, buffer0, 0, 4}});
-            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, buffer0, 256, 4}});
-            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
-
-            // Set bind group against the same index twice. The second one overwrites the first one.
-            // Then no buffer is used as both read and write in the same dispatch. But the
-            // overwritten bind group still take effect.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // Set bind group against the same index twice. The second one overwrites the first one.
-            // Then buffer0 is used as both read and write in the same dispatch
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when all bindings are not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictBetweenInvisibleStagesInBindGroup) {
-        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-        // Test render pass for bind group. The conflict of readonly storage and storage usage
-        // doesn't reside in render related stages at all
-        {
-            // Create a bind group whose bindings are not visible in render pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // These two bindings are invisible in render pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass for bind group. The conflict of readonly storage and storage usage
-        // doesn't reside in compute related stage at all
-        {
-            // Create a bind group whose bindings are not visible in compute pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
-                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // These two bindings are invisible in the dispatch. But we still track these bindings.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetBindGroup(1, readBG1);
             pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Set bind group against the same index twice. The second one overwrites the first one.
+        // Then buffer0 is used as both read and write in the same dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetPipeline(cp);
             pass.DispatchWorkgroups(1);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithInvisibleStageInBindGroup) {
-        // Test render pass for bind group and index buffer. The conflict of storage and index
-        // buffer usage resides between fragment stage and compute stage. But the compute stage
-        // binding is not visible in render pass.
-        {
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+// Test that it is invalid to have resource usage conflicts even when all bindings are not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictBetweenInvisibleStagesInBindGroup) {
+    wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
 
-            // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
-            // for compute stage is not visible in render pass. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    // Test render pass for bind group. The conflict of readonly storage and storage usage
+    // doesn't reside in render related stages at all
+    {
+        // Create a bind group whose bindings are not visible in render pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                     {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
 
-        // Test compute pass for bind group. The conflict of readonly storage and storage buffer
-        // usage resides between compute stage and fragment stage. But the fragment stage binding is
-        // not visible in the dispatch.
-        {
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
-                         {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
-            // binding for fragment stage is not visible in the dispatch. But we still track this
-            // invisible binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+        // These two bindings are invisible in render pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // used in the pipeline.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithUnusedPipelineBindings) {
+    // Test compute pass for bind group. The conflict of readonly storage and storage usage
+    // doesn't reside in compute related stage at all
+    {
+        // Create a bind group whose bindings are not visible in compute pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // These two bindings are invisible in the dispatch. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithInvisibleStageInBindGroup) {
+    // Test render pass for bind group and index buffer. The conflict of storage and index
+    // buffer usage resides between fragment stage and compute stage. But the compute stage
+    // binding is not visible in render pass.
+    {
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
+        // for compute stage is not visible in render pass. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass for bind group. The conflict of readonly storage and storage buffer
+    // usage resides between compute stage and fragment stage. But the fragment stage binding is
+    // not visible in the dispatch.
+    {
         wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
 
-        // Test render pass for bind groups with unused bindings. The conflict of readonly storage
-        // and storage usages resides in different bind groups, although some bindings may not be
-        // used because its bind group layout is not designated in pipeline layout.
-        {
-            // Create bind groups. The bindings are visible for render pass.
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
 
-            // Create a passthrough render pipeline with a readonly buffer
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        // Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
+        // binding for fragment stage is not visible in the dispatch. But we still track this
+        // invisible binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// used in the pipeline.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithUnusedPipelineBindings) {
+    wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+    // Test render pass for bind groups with unused bindings. The conflict of readonly storage
+    // and storage usages resides in different bind groups, although some bindings may not be
+    // used because its bind group layout is not designated in pipeline layout.
+    {
+        // Create bind groups. The bindings are visible for render pass.
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Create a passthrough render pipeline with a readonly buffer
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 struct RBuffer {
                     value : f32
                 }
                 @group(0) @binding(0) var<storage, read> rBuffer : RBuffer;
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
-            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
-            // not used in pipeline. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg0);
-            pass.SetBindGroup(1, bg1);
-            pass.SetPipeline(rp);
-            pass.Draw(3);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test that an unused bind group is not used to detect conflicts between bindings in
-        // compute passes.
-        {
-            // Create bind groups. The bindings are visible for compute pass.
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Create a compute pipeline with only one of the two BGLs.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
-
-            // Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
-            // not used in pipeline so no error is produced in the dispatch.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg0);
-            pass.SetBindGroup(1, bg1);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
+        // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
+        // not used in pipeline. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetPipeline(rp);
+        pass.Draw(3);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that it is invalid to use the same texture as both readable and writable in the same
-    // render pass. It is invalid in the same dispatch in compute pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsage) {
-        // Test render pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view = texture.CreateView();
+    // Test that an unused bind group is not used to detect conflicts between bindings in
+    // compute passes.
+    {
+        // Create bind groups. The bindings are visible for compute pass.
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
 
-            // Create a bind group to use the texture as sampled binding
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+        // Create a compute pipeline with only one of the two BGLs.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
 
-            // Create a render pass to use the texture as a render target
-            utils::ComboRenderPassDescriptor renderPass({view});
-
-            // It is invalid to use the texture as both sampled and render target in the same pass
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the texture as both sampled and writeonly storage in a single
-            // compute pass if dispatch command is not called.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is invalid to use the texture as both sampled and writeonly storage in a single
-            // dispatch
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetPipeline(cp);
-                pass.SetBindGroup(0, bg);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
+        // Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
+        // not used in pipeline so no error is produced in the dispatch.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
     }
+}
 
-    // Test that it is invalid to use the same texture as both readable and writable depth/stencil
-    // attachment in the same render pass. But it is valid to use it as both readable and readonly
-    // depth/stencil attachment in the same render pass.
-    // Note that depth/stencil attachment is a special render attachment, it can be readonly.
-    TEST_F(ResourceUsageTrackingTest, TextureWithSamplingAndDepthStencilAttachment) {
+// Test that it is invalid to use the same texture as both readable and writable in the same
+// render pass. It is invalid in the same dispatch in compute pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsage) {
+    // Test render pass
+    {
         // Create a texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
-                          wgpu::TextureFormat::Depth32Float);
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                              wgpu::TextureUsage::RenderAttachment);
         wgpu::TextureView view = texture.CreateView();
 
         // Create a bind group to use the texture as sampled binding
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
         wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
 
         // Create a render pass to use the texture as a render target
-        utils::ComboRenderPassDescriptor passDescriptor({}, view);
-        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-        passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-        passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        utils::ComboRenderPassDescriptor renderPass({view});
 
-        // It is invalid to use the texture as both sampled and writeable depth/stencil attachment
-        // in the same pass
+        // It is invalid to use the texture as both sampled and render target in the same pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create a texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the texture as both sampled and writeonly storage in a single
+        // compute pass if dispatch command is not called.
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetBindGroup(0, bg);
             pass.End();
+            encoder.Finish();
+        }
+
+        // It is invalid to use the texture as both sampled and writeonly storage in a single
+        // dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.DispatchWorkgroups(1);
+            pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
+    }
+}
 
-        // It is valid to use the texture as both sampled and readonly depth/stencil attachment in
+// Test that it is invalid to use the same texture as both readable and writable depth/stencil
+// attachment in the same render pass. But it is valid to use it as both readable and readonly
+// depth/stencil attachment in the same render pass.
+// Note that depth/stencil attachment is a special render attachment, it can be readonly.
+TEST_F(ResourceUsageTrackingTest, TextureWithSamplingAndDepthStencilAttachment) {
+    // Create a texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
+                      wgpu::TextureFormat::Depth32Float);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Create a bind group to use the texture as sampled binding
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+    // Create a render pass to use the texture as a render target
+    utils::ComboRenderPassDescriptor passDescriptor({}, view);
+    passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+    passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+    passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+    // It is invalid to use the texture as both sampled and writeable depth/stencil attachment
+    // in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // It is valid to use the texture as both sampled and readonly depth/stencil attachment in
+    // the same pass
+    {
+        passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test using multiple writable usages on the same texture in a single pass/dispatch
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleWriteUsage) {
+    // Test render pass
+    {
+        // Create a texture
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding |
+                                              wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as writeonly storage binding
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+        // It is invalid to use the texture as both writeonly storage and render target in
         // the same pass
         {
-            passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test using multiple writable usages on the same texture in a single pass/dispatch
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleWriteUsage) {
-        // Test render pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding |
-                                                  wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as writeonly storage binding
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-            // It is invalid to use the texture as both writeonly storage and render target in
-            // the same pass
-            {
-                utils::ComboRenderPassDescriptor renderPass({view});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // It is valid to use multiple writeonly storage usages on the same texture in render
-            // pass
-            {
-                wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                PlaceholderRenderPass PlaceholderRenderPass(device);
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetBindGroup(0, bg);
-                pass.SetBindGroup(1, bg1);
-                pass.End();
-                encoder.Finish();
-            }
-        }
-
-        // Test compute pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the texture as multiple writeonly storage usages in a single
-            // dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that a single subresource of a texture cannot be used as a render attachment more than
-    // once in the same pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleRenderAttachmentUsage) {
-        // Create a texture with two array layers
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size = {1, 1, 2};
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-        descriptor.format = kFormat;
-
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.arrayLayerCount = 1;
-
-        wgpu::TextureView viewLayer0 = texture.CreateView(&viewDesc);
-
-        viewDesc.baseArrayLayer = 1;
-        wgpu::TextureView viewLayer1 = texture.CreateView(&viewDesc);
-
-        // Control: It is valid to use layer0 as a render target for one attachment, and
-        // layer1 as the second attachment in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Control: It is valid to use layer0 as a render target in separate passes.
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
-            pass0.End();
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
-            pass1.End();
-            encoder.Finish();
-        }
-
-        // It is invalid to use layer0 as a render target for both attachments in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer0});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // It is invalid to use layer1 as a render target for both attachments in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer1, viewLayer1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that using the same texture as both readable and writable in different passes is
-    // allowed
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInDifferentPasses) {
-        // Test render pass
-        {
-            // Create textures that will be used as both a sampled texture and a render target
-            wgpu::Texture t0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                             wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView v0 = t0.CreateView();
-            wgpu::Texture t1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                             wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView v1 = t1.CreateView();
-
-            // Create bind groups to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, v0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, v1}});
-
-            // Create render passes that will use the textures as render attachments
-            utils::ComboRenderPassDescriptor renderPass0({v1});
-            utils::ComboRenderPassDescriptor renderPass1({v0});
-
-            // Use the textures as both sampled and render attachments in different passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass0);
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass1);
-            pass1.SetBindGroup(0, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create a texture that will be used storage texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create bind groups to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Use the textures as both sampled and writeonly storages in different passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, readBG);
-            pass0.End();
-
-            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
-            pass1.SetBindGroup(0, writeBG);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass and render pass mixed together with resource dependency
-        {
-            // Create a texture that will be used a storage texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create bind groups to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-
-            // Use the texture as writeonly and sampled storage in compute pass and render
-            // pass respectively
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, writeBG);
-            pass0.End();
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetBindGroup(0, readBG);
-            pass1.End();
-
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same texture as both readable and writable in different
-    // draws in a single render pass. But it is valid in different dispatches in a single compute
-    // pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageOnDifferentDrawsOrDispatches) {
-        // Create a texture that will be used both as a sampled texture and a storage texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create bind groups to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is not allowed to use the same texture as both readable and writable in different
-            // draws within the same render pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetBindGroup(0, sampledBG);
-            pass.Draw(3);
-
-            pass.SetBindGroup(0, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create bind groups to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
-            wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
-
-            // It is valid to use the same texture as both readable and writable in different
-            // dispatches within the same compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-            pass.SetPipeline(readCp);
-            pass.SetBindGroup(0, readBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.SetPipeline(writeCp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same texture as both readable and writable in a single
-    // draw or dispatch.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInSingleDrawOrDispatch) {
-        // Create a texture that will be used both as a sampled texture and a storage texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create the bind group to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is invalid to use the same texture as both readable and writable usages in a
-            // single draw
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetBindGroup(0, sampledBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create the bind group to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
-
-            // It is invalid to use the same texture as both readable and writable usages in a
-            // single dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that using a single texture as copy src/dst and writable/readable usage in pass is
-    // allowed.
-    TEST_F(ResourceUsageTrackingTest, TextureCopyAndTextureUsageInPass) {
-        // Create textures that will be used as both a sampled texture and a render target
-        wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::CopySrc);
-        wgpu::Texture texture1 =
-            CreateTexture(wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
-                          wgpu::TextureUsage::RenderAttachment);
-        wgpu::TextureView view0 = texture0.CreateView();
-        wgpu::TextureView view1 = texture1.CreateView();
-
-        wgpu::ImageCopyTexture srcView = utils::CreateImageCopyTexture(texture0, 0, {0, 0, 0});
-        wgpu::ImageCopyTexture dstView = utils::CreateImageCopyTexture(texture1, 0, {0, 0, 0});
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        // Use the texture as both copy dst and render attachment in render pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
-            utils::ComboRenderPassDescriptor renderPass({view1});
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Use the texture as both copy dst and readable usage in compute pass
-        {
-            // Create the bind group to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
-
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
-    // used because they are overwritten by a consecutive call.
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleSetBindGroupsOnSameIndex) {
-        // Test render pass
-        {
-            // Create textures that will be used as both a sampled texture and a render target
-            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view0 = texture0.CreateView();
-            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view1 = texture1.CreateView();
-
-            // Create the bind group to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, view0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view1}});
-
-            // Create the render pass that will use the texture as an render attachment
-            utils::ComboRenderPassDescriptor renderPass({view0});
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No texture is used as both sampled and render attachment in the same pass. But the
-            // overwritten texture still take effect during resource tracking.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg0);
-                pass.SetBindGroup(0, bg1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // texture0 is used as both sampled and render attachment in the same pass
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg1);
-                pass.SetBindGroup(0, bg0);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-
-        // Test compute pass
-        {
-            // Create a texture that will be used both as storage texture
-            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view0 = texture0.CreateView();
-            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding);
-            wgpu::TextureView view1 = texture1.CreateView();
-
-            // Create the bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-
-            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, view0}});
-            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, view0}});
-            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No texture is used as both sampled and writeonly storage in the same dispatch so
-            // there are no errors.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // texture0 is used as both writeonly and sampled storage in the same dispatch, which
-            // is an error.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when all bindings are not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictBetweenInvisibleStagesInBindGroup) {
-        // Create texture and texture view
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass for bind group. The conflict of sampled storage and writeonly storage
-        // usage doesn't reside in render related stages at all
-        {
-            // Create a bind group whose bindings are not visible in render pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // These two bindings are invisible in render pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass for bind group. The conflict of sampled storage and writeonly storage
-        // usage doesn't reside in compute related stage at all
-        {
-            // Create a bind group whose bindings are not visible in compute pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // These two bindings are invisible in compute pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithInvisibleStageInBindGroup) {
-        // Create texture and texture view
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
-                          wgpu::TextureUsage::RenderAttachment);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create the render pass that will use the texture as an render attachment
             utils::ComboRenderPassDescriptor renderPass({view});
 
-            // Create a bind group which use the texture as sampled storage in compute stage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-            // Texture usage in compute stage in bind group conflicts with render target. And
-            // binding for compute stage is not visible in render pass. But we still track this
-            // binding.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
             pass.SetBindGroup(0, bg);
@@ -1532,162 +949,733 @@
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
 
-        // Test compute pass
+        // It is valid to use multiple writeonly storage usages on the same texture in render
+        // pass
         {
-            // Create a bind group which contains both fragment and compute stages
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
 
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // Texture usage in compute stage conflicts with texture usage in fragment stage. And
-            // binding for fragment stage is not visible in compute pass. But we still track this
-            // invisible binding.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
+            PlaceholderRenderPass PlaceholderRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
             pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
+            pass.SetBindGroup(1, bg1);
             pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
+            encoder.Finish();
         }
     }
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // used in the pipeline.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithUnusedPipelineBindings) {
-        // Create texture and texture view
+    // Test compute pass
+    {
+        // Create a texture
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the texture as multiple writeonly storage usages in a single
+        // dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that a single subresource of a texture cannot be used as a render attachment more than
+// once in the same pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleRenderAttachmentUsage) {
+    // Create a texture with two array layers
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {1, 1, 2};
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    descriptor.format = kFormat;
+
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.arrayLayerCount = 1;
+
+    wgpu::TextureView viewLayer0 = texture.CreateView(&viewDesc);
+
+    viewDesc.baseArrayLayer = 1;
+    wgpu::TextureView viewLayer1 = texture.CreateView(&viewDesc);
+
+    // Control: It is valid to use layer0 as a render target for one attachment, and
+    // layer1 as the second attachment in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Control: It is valid to use layer0 as a render target in separate passes.
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
+        pass0.End();
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
+        pass1.End();
+        encoder.Finish();
+    }
+
+    // It is invalid to use layer0 as a render target for both attachments in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // It is invalid to use layer1 as a render target for both attachments in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer1, viewLayer1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using the same texture as both readable and writable in different passes is
+// allowed
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInDifferentPasses) {
+    // Test render pass
+    {
+        // Create textures that will be used as both a sampled texture and a render target
+        wgpu::Texture t0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                         wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView v0 = t0.CreateView();
+        wgpu::Texture t1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                         wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView v1 = t1.CreateView();
+
+        // Create bind groups to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, v0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, v1}});
+
+        // Create render passes that will use the textures as render attachments
+        utils::ComboRenderPassDescriptor renderPass0({v1});
+        utils::ComboRenderPassDescriptor renderPass1({v0});
+
+        // Use the textures as both sampled and render attachments in different passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass0);
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass1);
+        pass1.SetBindGroup(0, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass
+    {
+        // Create a texture that will be used storage texture
         wgpu::Texture texture =
             CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
         wgpu::TextureView view = texture.CreateView();
 
-        // Create bind groups.
+        // Create bind groups to use the texture as sampled and writeonly bindings
         wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::TextureSampleType::Float}});
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
         wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
         wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
         wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
 
-        // Test render pass
+        // Use the textures as both sampled and writeonly storages in different passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, readBG);
+        pass0.End();
+
+        wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+        pass1.SetBindGroup(0, writeBG);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass and render pass mixed together with resource dependency
+    {
+        // Create a texture that will be used a storage texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create bind groups to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+
+        // Use the texture as writeonly and sampled storage in compute pass and render
+        // pass respectively
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, writeBG);
+        pass0.End();
+
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetBindGroup(0, readBG);
+        pass1.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same texture as both readable and writable in different
+// draws in a single render pass. But it is valid in different dispatches in a single compute
+// pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageOnDifferentDrawsOrDispatches) {
+    // Create a texture that will be used both as a sampled texture and a storage texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create bind groups to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is not allowed to use the same texture as both readable and writable in different
+        // draws within the same render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetBindGroup(0, sampledBG);
+        pass.Draw(3);
+
+        pass.SetBindGroup(0, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create bind groups to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+        wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+        // It is valid to use the same texture as both readable and writable in different
+        // dispatches within the same compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+        pass.SetPipeline(readCp);
+        pass.SetBindGroup(0, readBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.SetPipeline(writeCp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same texture as both readable and writable in a single
+// draw or dispatch.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInSingleDrawOrDispatch) {
+    // Create a texture that will be used both as a sampled texture and a storage texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create the bind group to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is invalid to use the same texture as both readable and writable usages in a
+        // single draw
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetBindGroup(0, sampledBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create the bind group to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+        // It is invalid to use the same texture as both readable and writable usages in a
+        // single dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using a single texture as copy src/dst and writable/readable usage in pass is
+// allowed.
+TEST_F(ResourceUsageTrackingTest, TextureCopyAndTextureUsageInPass) {
+    // Create textures that will be used as both a sampled texture and a render target
+    wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::CopySrc);
+    wgpu::Texture texture1 =
+        CreateTexture(wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                      wgpu::TextureUsage::RenderAttachment);
+    wgpu::TextureView view0 = texture0.CreateView();
+    wgpu::TextureView view1 = texture1.CreateView();
+
+    wgpu::ImageCopyTexture srcView = utils::CreateImageCopyTexture(texture0, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture dstView = utils::CreateImageCopyTexture(texture1, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    // Use the texture as both copy dst and render attachment in render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+        utils::ComboRenderPassDescriptor renderPass({view1});
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Use the texture as both copy dst and readable usage in compute pass
+    {
+        // Create the bind group to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+// used because they are overwritten by a consecutive call.
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleSetBindGroupsOnSameIndex) {
+    // Test render pass
+    {
+        // Create textures that will be used as both a sampled texture and a render target
+        wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                               wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view0 = texture0.CreateView();
+        wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                               wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view1 = texture1.CreateView();
+
+        // Create the bind group to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, view0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+        // Create the render pass that will use the texture as an render attachment
+        utils::ComboRenderPassDescriptor renderPass({view0});
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No texture is used as both sampled and render attachment in the same pass. But the
+        // overwritten texture still take effect during resource tracking.
         {
-            // Create a passthrough render pipeline with a sampled storage texture
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(0, bg1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // texture0 is used as both sampled and render attachment in the same pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg1);
+            pass.SetBindGroup(0, bg0);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test compute pass
+    {
+        // Create a texture that will be used both as storage texture
+        wgpu::Texture texture0 =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view0 = texture0.CreateView();
+        wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding);
+        wgpu::TextureView view1 = texture1.CreateView();
+
+        // Create the bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+        wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, view0}});
+        wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, view0}});
+        wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No texture is used as both sampled and writeonly storage in the same dispatch so
+        // there are no errors.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetPipeline(cp);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // texture0 is used as both writeonly and sampled storage in the same dispatch, which
+        // is an error.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetPipeline(cp);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when all bindings are not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictBetweenInvisibleStagesInBindGroup) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass for bind group. The conflict of sampled storage and writeonly storage
+    // usage doesn't reside in render related stages at all
+    {
+        // Create a bind group whose bindings are not visible in render pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+                     {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // These two bindings are invisible in render pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass for bind group. The conflict of sampled storage and writeonly storage
+    // usage doesn't reside in compute related stage at all
+    {
+        // Create a bind group whose bindings are not visible in compute pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+                     {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // These two bindings are invisible in compute pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithInvisibleStageInBindGroup) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
+                      wgpu::TextureUsage::RenderAttachment);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create the render pass that will use the texture as an render attachment
+        utils::ComboRenderPassDescriptor renderPass({view});
+
+        // Create a bind group which use the texture as sampled storage in compute stage
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+        // Texture usage in compute stage in bind group conflicts with render target. And
+        // binding for compute stage is not visible in render pass. But we still track this
+        // binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create a bind group which contains both fragment and compute stages
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // Texture usage in compute stage conflicts with texture usage in fragment stage. And
+        // binding for fragment stage is not visible in compute pass. But we still track this
+        // invisible binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// used in the pipeline.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithUnusedPipelineBindings) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Create bind groups.
+    wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+    wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+    wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+    // Test render pass
+    {
+        // Create a passthrough render pipeline with a sampled storage texture
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 @group(0) @binding(0) var tex : texture_2d<f32>;
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
-            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
-            // in writeBG is not used in pipeline. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.SetPipeline(rp);
-            pass.Draw(3);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
-
-            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
-            // in writeBG is not used in pipeline's layout so it isn't an error.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
+        // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+        // in writeBG is not used in pipeline. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.SetPipeline(rp);
+        pass.Draw(3);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that using an indirect buffer is disallowed with a writable usage (like storage) but
-    // allowed with a readable usage (like readonly storage).
-    TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
-        wgpu::Buffer buffer =
-            CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
+    // Test compute pass
+    {
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
 
-        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-
-        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
-        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-        // Test pipelines
-        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-        wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
-        wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
-
-        // Test that indirect + readonly is allowed in the same render pass.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-            pass.SetBindGroup(0, readBG);
-            pass.DrawIndirect(buffer, 0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test that indirect + writable is disallowed in the same render pass.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DrawIndirect(buffer, 0);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test that indirect + readonly is allowed in the same dispatch
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(readCp);
-            pass.SetBindGroup(0, readBG);
-            pass.DispatchWorkgroupsIndirect(buffer, 0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test that indirect + writable is disallowed in the same dispatch
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(writeCp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DispatchWorkgroupsIndirect(buffer, 0);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+        // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+        // in writeBG is not used in pipeline's layout so it isn't an error.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
     }
+}
+
+// Test that using an indirect buffer is disallowed with a writable usage (like storage) but
+// allowed with a readable usage (like readonly storage).
+TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
+    wgpu::Buffer buffer =
+        CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+    wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+    wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+    // Test pipelines
+    wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+    wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+    wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+    // Test that indirect + readonly is allowed in the same render pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+        pass.SetBindGroup(0, readBG);
+        pass.DrawIndirect(buffer, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Test that indirect + writable is disallowed in the same render pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DrawIndirect(buffer, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test that indirect + readonly is allowed in the same dispatch
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(readCp);
+        pass.SetBindGroup(0, readBG);
+        pass.DispatchWorkgroupsIndirect(buffer, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Test that indirect + writable is disallowed in the same dispatch
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(writeCp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DispatchWorkgroupsIndirect(buffer, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
index b304d11..8fa577a 100644
--- a/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
@@ -20,105 +20,105 @@
 
 namespace {
 
-    class SamplerValidationTest : public ValidationTest {};
+class SamplerValidationTest : public ValidationTest {};
 
-    // Test NaN and INFINITY values are not allowed
-    TEST_F(SamplerValidationTest, InvalidLOD) {
-        { device.CreateSampler(); }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMinClamp = NAN;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = NAN;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = INFINITY;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = INFINITY;
-            samplerDesc.lodMinClamp = INFINITY;
-            device.CreateSampler(&samplerDesc);
-        }
+// Test NaN and INFINITY values are not allowed
+TEST_F(SamplerValidationTest, InvalidLOD) {
+    { device.CreateSampler(); }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMinClamp = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = INFINITY;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = INFINITY;
+        samplerDesc.lodMinClamp = INFINITY;
+        device.CreateSampler(&samplerDesc);
+    }
+}
 
-    TEST_F(SamplerValidationTest, InvalidFilterAnisotropic) {
-        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
-        kValidAnisoSamplerDesc.maxAnisotropy = 2;
-        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
-        {
-            // when maxAnisotropy > 1, min, mag, mipmap filter should be linear
-            device.CreateSampler(&kValidAnisoSamplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
-            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
-            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
+TEST_F(SamplerValidationTest, InvalidFilterAnisotropic) {
+    wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+    kValidAnisoSamplerDesc.maxAnisotropy = 2;
+    kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+    {
+        // when maxAnisotropy > 1, min, mag, mipmap filter should be linear
+        device.CreateSampler(&kValidAnisoSamplerDesc);
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+        samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+        samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+}
 
-    TEST_F(SamplerValidationTest, ValidFilterAnisotropic) {
-        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
-        kValidAnisoSamplerDesc.maxAnisotropy = 2;
-        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
-        { device.CreateSampler(); }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 16;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 32;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0x7FFF;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0x8000;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0xFFFF;
-            device.CreateSampler(&samplerDesc);
-        }
+TEST_F(SamplerValidationTest, ValidFilterAnisotropic) {
+    wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+    kValidAnisoSamplerDesc.maxAnisotropy = 2;
+    kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+    { device.CreateSampler(); }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 16;
+        device.CreateSampler(&samplerDesc);
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 32;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0x7FFF;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0x8000;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0xFFFF;
+        device.CreateSampler(&samplerDesc);
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
index 16e443a..02b7979 100644
--- a/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
@@ -15,9 +15,9 @@
 #include <sstream>
 #include <string>
 
-#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/native/ShaderModule.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
diff --git a/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
index 4986d98..78f5b66 100644
--- a/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
@@ -18,119 +18,118 @@
 
 namespace {
 
-    class TextureSubresourceTest : public ValidationTest {
-      public:
-        static constexpr uint32_t kSize = 32u;
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+class TextureSubresourceTest : public ValidationTest {
+  public:
+    static constexpr uint32_t kSize = 32u;
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-        wgpu::Texture CreateTexture(uint32_t mipLevelCount,
-                                    uint32_t arrayLayerCount,
-                                    wgpu::TextureUsage usage) {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.dimension = wgpu::TextureDimension::e2D;
-            texDesc.size = {kSize, kSize, arrayLayerCount};
-            texDesc.sampleCount = 1;
-            texDesc.mipLevelCount = mipLevelCount;
-            texDesc.usage = usage;
-            texDesc.format = kFormat;
-            return device.CreateTexture(&texDesc);
-        }
-
-        wgpu::TextureView CreateTextureView(wgpu::Texture texture,
-                                            uint32_t baseMipLevel,
-                                            uint32_t baseArrayLayer) {
-            wgpu::TextureViewDescriptor viewDesc;
-            viewDesc.format = kFormat;
-            viewDesc.baseArrayLayer = baseArrayLayer;
-            viewDesc.arrayLayerCount = 1;
-            viewDesc.baseMipLevel = baseMipLevel;
-            viewDesc.mipLevelCount = 1;
-            viewDesc.dimension = wgpu::TextureViewDimension::e2D;
-            return texture.CreateView(&viewDesc);
-        }
-
-        void TestRenderPass(const wgpu::TextureView& renderView,
-                            const wgpu::TextureView& samplerView) {
-            // Create bind group
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-
-            utils::ComboRenderPassDescriptor renderPassDesc({renderView});
-
-            // It is valid to read from and write into different subresources of the same texture
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is not currently possible to test that it is valid to have multiple reads from a
-            // subresource while there is a single write in another subresource.
-
-            // It is invalid to read and write into the same subresources
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, renderView}});
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // It is valid to write into and then read from the same level of a texture in different
-            // render passes
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
-
-                wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                    device, {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly,
-                              kFormat}});
-                wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, bgl1, {{0, samplerView}});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPassDesc);
-                pass1.SetBindGroup(0, bindGroup1);
-                pass1.End();
-
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-
-                encoder.Finish();
-            }
-        }
-    };
-
-    // Test different mipmap levels
-    TEST_F(TextureSubresourceTest, MipmapLevelsTest) {
-        // Create texture with 2 mipmap levels and 1 layer
-        wgpu::Texture texture = CreateTexture(2, 1,
-                                              wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment |
-                                                  wgpu::TextureUsage::StorageBinding);
-
-        // Create two views on different mipmap levels.
-        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
-        wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
-        TestRenderPass(samplerView, renderView);
+    wgpu::Texture CreateTexture(uint32_t mipLevelCount,
+                                uint32_t arrayLayerCount,
+                                wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.dimension = wgpu::TextureDimension::e2D;
+        texDesc.size = {kSize, kSize, arrayLayerCount};
+        texDesc.sampleCount = 1;
+        texDesc.mipLevelCount = mipLevelCount;
+        texDesc.usage = usage;
+        texDesc.format = kFormat;
+        return device.CreateTexture(&texDesc);
     }
 
-    // Test different array layers
-    TEST_F(TextureSubresourceTest, ArrayLayersTest) {
-        // Create texture with 1 mipmap level and 2 layers
-        wgpu::Texture texture = CreateTexture(1, 2,
-                                              wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment |
-                                                  wgpu::TextureUsage::StorageBinding);
-
-        // Create two views on different layers.
-        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
-        wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
-
-        TestRenderPass(samplerView, renderView);
+    wgpu::TextureView CreateTextureView(wgpu::Texture texture,
+                                        uint32_t baseMipLevel,
+                                        uint32_t baseArrayLayer) {
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.format = kFormat;
+        viewDesc.baseArrayLayer = baseArrayLayer;
+        viewDesc.arrayLayerCount = 1;
+        viewDesc.baseMipLevel = baseMipLevel;
+        viewDesc.mipLevelCount = 1;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        return texture.CreateView(&viewDesc);
     }
 
+    void TestRenderPass(const wgpu::TextureView& renderView, const wgpu::TextureView& samplerView) {
+        // Create bind group
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+
+        utils::ComboRenderPassDescriptor renderPassDesc({renderView});
+
+        // It is valid to read from and write into different subresources of the same texture
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // It is not currently possible to test that it is valid to have multiple reads from a
+        // subresource while there is a single write in another subresource.
+
+        // It is invalid to read and write into the same subresources
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, renderView}});
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // It is valid to write into and then read from the same level of a texture in different
+        // render passes
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, bgl1, {{0, samplerView}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPassDesc);
+            pass1.SetBindGroup(0, bindGroup1);
+            pass1.End();
+
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+
+            encoder.Finish();
+        }
+    }
+};
+
+// Test different mipmap levels
+TEST_F(TextureSubresourceTest, MipmapLevelsTest) {
+    // Create texture with 2 mipmap levels and 1 layer
+    wgpu::Texture texture =
+        CreateTexture(2, 1,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::StorageBinding);
+
+    // Create two views on different mipmap levels.
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
+    TestRenderPass(samplerView, renderView);
+}
+
+// Test different array layers
+TEST_F(TextureSubresourceTest, ArrayLayersTest) {
+    // Create texture with 1 mipmap level and 2 layers
+    wgpu::Texture texture =
+        CreateTexture(1, 2,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::StorageBinding);
+
+    // Create two views on different layers.
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
+
+    TestRenderPass(samplerView, renderView);
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/TextureValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
index ac0192e..3de9c273 100644
--- a/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
@@ -22,890 +22,888 @@
 
 namespace {
 
-    constexpr wgpu::TextureFormat kNonRenderableColorFormats[] = {
-        wgpu::TextureFormat::RG11B10Ufloat, wgpu::TextureFormat::RGB9E5Ufloat,
-        wgpu::TextureFormat::R8Snorm,       wgpu::TextureFormat::RG8Snorm,
-        wgpu::TextureFormat::RGBA8Snorm,
-    };
+constexpr wgpu::TextureFormat kNonRenderableColorFormats[] = {
+    wgpu::TextureFormat::RG11B10Ufloat, wgpu::TextureFormat::RGB9E5Ufloat,
+    wgpu::TextureFormat::R8Snorm,       wgpu::TextureFormat::RG8Snorm,
+    wgpu::TextureFormat::RGBA8Snorm,
+};
 
-    wgpu::TextureDimension kDimensions[] = {
-        wgpu::TextureDimension::e1D,
-        wgpu::TextureDimension::e3D,
-    };
+wgpu::TextureDimension kDimensions[] = {
+    wgpu::TextureDimension::e1D,
+    wgpu::TextureDimension::e3D,
+};
 
-    class TextureValidationTest : public ValidationTest {
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
+class TextureValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            queue = device.GetQueue();
-        }
-
-        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            descriptor.size.depthOrArrayLayers = kDefaultDepth;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            descriptor.sampleCount = kDefaultSampleCount;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.format = kDefaultTextureFormat;
-            descriptor.usage =
-                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
-            return descriptor;
-        }
-
-        wgpu::Queue queue;
-
-      private:
-        // Choose the LCM of all current compressed texture format texel dimensions as the
-        // dimensions of the default texture.
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-        static constexpr uint32_t kDefaultDepth = 1;
-        static constexpr uint32_t kDefaultMipLevels = 1;
-        static constexpr uint32_t kDefaultSampleCount = 1;
-
-        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
-            wgpu::TextureFormat::RGBA8Unorm;
-    };
-
-    // Test the validation of non-zero texture usage
-    TEST_F(TextureValidationTest, UsageNonZero) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        // Descriptor with proper usage is allowed
-        {
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // It is an error to create a texture with zero usage
-        {
-            descriptor.usage = wgpu::TextureUsage::None;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+        queue = device.GetQueue();
     }
 
-    // Test the validation of sample count
-    TEST_F(TextureValidationTest, SampleCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-
-        // sampleCount == 1 is allowed.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 1;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // sampleCount == 4 is allowed.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // It is an error to create a texture with an invalid sampleCount.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 3;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisampled texture with mipLevelCount > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.mipLevelCount = 2;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisampled 1D or 3D texture.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-
-            descriptor.size.height = 1;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.dimension = wgpu::TextureDimension::e3D;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisample texture when the format cannot support
-        // multisample.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.usage = wgpu::TextureUsage::TextureBinding;
-
-            for (wgpu::TextureFormat format : utils::kFormatsInCoreSpec) {
-                descriptor.format = format;
-                if (utils::TextureFormatSupportsMultisampling(format)) {
-                    device.CreateTexture(&descriptor);
-                } else {
-                    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-                }
-            }
-        }
-
-        // Currently we do not support multisampled 2D textures with depth > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.size.depthOrArrayLayers = 2;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to set TextureUsage::StorageBinding when sampleCount > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.usage |= wgpu::TextureUsage::StorageBinding;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        descriptor.size.depthOrArrayLayers = kDefaultDepth;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.sampleCount = kDefaultSampleCount;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+        return descriptor;
     }
 
-    // Test the validation of the mip level count
-    TEST_F(TextureValidationTest, MipLevelCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+    wgpu::Queue queue;
 
-        // mipLevelCount == 1 is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 1;
+  private:
+    // Choose the LCM of all current compressed texture format texel dimensions as the
+    // dimensions of the default texture.
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+    static constexpr uint32_t kDefaultDepth = 1;
+    static constexpr uint32_t kDefaultMipLevels = 1;
+    static constexpr uint32_t kDefaultSampleCount = 1;
 
-            device.CreateTexture(&descriptor);
-        }
+    static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+};
 
-        // mipLevelCount == 0 is an error
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 0;
+// Test the validation of non-zero texture usage
+TEST_F(TextureValidationTest, UsageNonZero) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
 
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    // Descriptor with proper usage is allowed
+    {
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
 
-        // Full mip chains are allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            // Mip level sizes: 32, 16, 8, 4, 2, 1
-            descriptor.mipLevelCount = 6;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Test non-power-of-two width
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            // Mip level width: 31, 15, 7, 3, 1
-            descriptor.size.width = 31;
-            descriptor.size.height = 4;
-
-            // Full mip chains on non-power-of-two width are allowed
-            descriptor.mipLevelCount = 5;
-            device.CreateTexture(&descriptor);
-
-            // Too big mip chains on non-power-of-two width are disallowed
-            descriptor.mipLevelCount = 6;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Test non-power-of-two height
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 4;
-            // Mip level height: 31, 15, 7, 3, 1
-            descriptor.size.height = 31;
-
-            // Full mip chains on non-power-of-two height are allowed
-            descriptor.mipLevelCount = 5;
-            device.CreateTexture(&descriptor);
-
-            // Too big mip chains on non-power-of-two height are disallowed
-            descriptor.mipLevelCount = 6;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Undefined shift check if miplevel is bigger than the integer bit width.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 100;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Non square mip map halves the resolution until a 1x1 dimension
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            // Mip maps: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 * 1, 1 * 1
-            descriptor.mipLevelCount = 6;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Non square mip map for a 3D textures
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            descriptor.size.depthOrArrayLayers = 64;
-            descriptor.dimension = wgpu::TextureDimension::e3D;
-            // Non square mip map halves width, height and depth until a 1x1x1 dimension for a 3D
-            // texture. So there are 7 mipmaps at most: 32 * 8 * 64, 16 * 4 * 32, 8 * 2 * 16,
-            // 4 * 1 * 8, 2 * 1 * 4, 1 * 1 * 2, 1 * 1 * 1.
-            descriptor.mipLevelCount = 7;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Non square mip map for 2D textures with depth > 1
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            descriptor.size.depthOrArrayLayers = 64;
-            // Non square mip map halves width and height until a 1x1 dimension for a 2D texture,
-            // even its depth > 1. So there are 6 mipmaps at most: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 *
-            // 1, 1 * 1.
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.mipLevelCount = 7;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            descriptor.mipLevelCount = 6;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Mip level equal to the maximum for a 2D texture is allowed
-        {
-            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = maxTextureDimension2D;
-            descriptor.size.height = maxTextureDimension2D;
-            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Mip level exceeding the maximum for a 2D texture not allowed
-        {
-            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = maxTextureDimension2D;
-            descriptor.size.height = maxTextureDimension2D;
-            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // 1D textures can only have a single mip level.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            descriptor.size.width = 32;
-            descriptor.size.height = 1;
-
-            // Having a single mip level is allowed.
-            descriptor.mipLevelCount = 1;
-            device.CreateTexture(&descriptor);
-
-            // Having more than 1 is an error.
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of array layer count
-    TEST_F(TextureValidationTest, ArrayLayerCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a texture with zero usage
+    {
+        descriptor.usage = wgpu::TextureUsage::None;
 
-        // Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
 
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+// Test the validation of sample count
+TEST_F(TextureValidationTest, SampleCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
 
-        // Array layer count less than maxTextureArrayLayers is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
-            device.CreateTexture(&descriptor);
-        }
+    // sampleCount == 1 is allowed.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 1;
 
-        // Array layer count equal to maxTextureArrayLayers is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
-            device.CreateTexture(&descriptor);
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of 1D texture size
-    TEST_F(TextureValidationTest, 1DTextureSize) {
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // sampleCount == 4 is allowed.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
 
-        wgpu::TextureDescriptor defaultDescriptor;
-        defaultDescriptor.size = {4, 1, 1};
-        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
-        defaultDescriptor.usage = wgpu::TextureUsage::CopySrc;
-        defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-
-        // Width must be in [1, kMaxTextureDimension1D]
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.width = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-            desc.size.width = 1;
-            device.CreateTexture(&desc);
-
-            desc.size.width = supportedLimits.maxTextureDimension1D;
-            device.CreateTexture(&desc);
-            desc.size.width = supportedLimits.maxTextureDimension1D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
-
-        // Height must be 1
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.height = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-
-            desc.size.height = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
-
-        // DepthOrArrayLayers must be 1
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.depthOrArrayLayers = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-
-            desc.size.depthOrArrayLayers = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of 2D texture size
-    TEST_F(TextureValidationTest, 2DTextureSize) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a texture with an invalid sampleCount.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 3;
 
-        // Out-of-bound texture dimension is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size.width = 1;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Zero-sized texture is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size = {0, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 0, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, 0};
-            // 2D texture with depth=0 is not allowed
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Texture size less than max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Texture size equal to max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            device.CreateTexture(&descriptor);
-        }
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test the validation of 3D texture size
-    TEST_F(TextureValidationTest, 3DTextureSize) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        defaultDescriptor.dimension = wgpu::TextureDimension::e3D;
-        defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a multisampled texture with mipLevelCount > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.mipLevelCount = 2;
 
-        // Out-of-bound texture dimension is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Zero-sized texture is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {0, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 0, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, 0};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Texture size less than max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
-                               supportedLimits.maxTextureDimension3D >> 1,
-                               supportedLimits.maxTextureDimension3D >> 1};
-            device.CreateTexture(&descriptor);
-        }
-
-        // Texture size equal to max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D,
-                               supportedLimits.maxTextureDimension3D,
-                               supportedLimits.maxTextureDimension3D};
-            device.CreateTexture(&descriptor);
-        }
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that depth/stencil formats are invalid for 1D and 3D texture
-    TEST_F(TextureValidationTest, DepthStencilFormatsFor1DAnd3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    // It is an error to create a multisampled 1D or 3D texture.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
 
-        wgpu::TextureFormat depthStencilFormats[] = {
-            wgpu::TextureFormat::Stencil8,     wgpu::TextureFormat::Depth16Unorm,
-            wgpu::TextureFormat::Depth24Plus,  wgpu::TextureFormat::Depth24PlusStencil8,
-            wgpu::TextureFormat::Depth32Float,
-        };
+        descriptor.size.height = 1;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
 
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            for (wgpu::TextureFormat format : depthStencilFormats) {
-                descriptor.format = format;
-                descriptor.dimension = dimension;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // It is an error to create a multisample texture when the format cannot support
+    // multisample.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+        for (wgpu::TextureFormat format : utils::kFormatsInCoreSpec) {
+            descriptor.format = format;
+            if (utils::TextureFormatSupportsMultisampling(format)) {
+                device.CreateTexture(&descriptor);
+            } else {
                 ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
             }
         }
     }
 
-    // Test that it is valid to destroy a texture
-    TEST_F(TextureValidationTest, DestroyTexture) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        texture.Destroy();
+    // Currently we do not support multisampled 2D textures with depth > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.size.depthOrArrayLayers = 2;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that it's valid to destroy a destroyed texture
-    TEST_F(TextureValidationTest, DestroyDestroyedTexture) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        texture.Destroy();
-        texture.Destroy();
+    // It is an error to set TextureUsage::StorageBinding when sampleCount > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.usage |= wgpu::TextureUsage::StorageBinding;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test the validation of the mip level count
+TEST_F(TextureValidationTest, MipLevelCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    // mipLevelCount == 1 is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 1;
+
+        device.CreateTexture(&descriptor);
     }
 
-    // Test that it's invalid to submit a destroyed texture in a queue
-    // in the case of destroy, encode, submit
-    TEST_F(TextureValidationTest, DestroyEncodeSubmit) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
+    // mipLevelCount == 0 is an error
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 0;
 
-        utils::ComboRenderPassDescriptor renderPass({textureView});
-
-        // Destroy the texture
-        texture.Destroy();
-
-        wgpu::CommandEncoder encoder_post_destroy = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder_post_destroy.BeginRenderPass(&renderPass);
-            pass.End();
-        }
-        wgpu::CommandBuffer commands = encoder_post_destroy.Finish();
-
-        // Submit should fail due to destroyed texture
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that it's invalid to submit a destroyed texture in a queue
-    // in the case of encode, destroy, submit
-    TEST_F(TextureValidationTest, EncodeDestroySubmit) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
+    // Full mip chains are allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        // Mip level sizes: 32, 16, 8, 4, 2, 1
+        descriptor.mipLevelCount = 6;
 
-        utils::ComboRenderPassDescriptor renderPass({textureView});
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-        }
-        wgpu::CommandBuffer commands = encoder.Finish();
-
-        // Destroy the texture
-        texture.Destroy();
-
-        // Submit should fail due to destroyed texture
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        device.CreateTexture(&descriptor);
     }
 
-    // Test it is an error to create an RenderAttachment texture with a non-renderable format.
-    TEST_F(TextureValidationTest, NonRenderableAndRenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    // Test non-power-of-two width
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        // Mip level width: 31, 15, 7, 3, 1
+        descriptor.size.width = 31;
+        descriptor.size.height = 4;
 
-        // Succeeds because RGBA8Unorm is renderable
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        // Full mip chains on non-power-of-two width are allowed
+        descriptor.mipLevelCount = 5;
         device.CreateTexture(&descriptor);
 
-        for (wgpu::TextureFormat format : kNonRenderableColorFormats) {
-            // Fails because `format` is non-renderable
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    // Test it is an error to create a Storage texture with any format that doesn't support
-    // TextureUsage::StorageBinding texture usages.
-    TEST_F(TextureValidationTest, TextureFormatNotSupportTextureUsageStorage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.usage = wgpu::TextureUsage::StorageBinding;
-
-        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
-            descriptor.format = format;
-            if (utils::TextureFormatSupportsStorageTexture(format)) {
-                device.CreateTexture(&descriptor);
-            } else {
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test it is an error to create a RenderAttachment texture with the texture dimensions that
-    // doesn't support TextureUsage::RenderAttachment texture usages.
-    TEST_F(TextureValidationTest, TextureDimensionNotSupportRenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-
-        constexpr std::array<wgpu::TextureDimension, 3> kTextureDimensions = {
-            {wgpu::TextureDimension::e1D, wgpu::TextureDimension::e2D,
-             wgpu::TextureDimension::e3D}};
-        for (wgpu::TextureDimension dimension : kTextureDimensions) {
-            descriptor.dimension = dimension;
-            if (dimension == wgpu::TextureDimension::e2D) {
-                device.CreateTexture(&descriptor);
-            } else {
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test it is an error to create a texture with format "Undefined".
-    TEST_F(TextureValidationTest, TextureFormatUndefined) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        descriptor.format = wgpu::TextureFormat::Undefined;
+        // Too big mip chains on non-power-of-two width are disallowed
+        descriptor.mipLevelCount = 6;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that the creation of a texture with depth24unorm-stencil8 will fail when the feature
-    // Depth24UnormStencil8 is not enabled.
-    TEST_F(TextureValidationTest, UseD24S8FormatWithoutEnablingFeature) {
+    // Test non-power-of-two height
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 4;
+        // Mip level height: 31, 15, 7, 3, 1
+        descriptor.size.height = 31;
+
+        // Full mip chains on non-power-of-two height are allowed
+        descriptor.mipLevelCount = 5;
+        device.CreateTexture(&descriptor);
+
+        // Too big mip chains on non-power-of-two height are disallowed
+        descriptor.mipLevelCount = 6;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Undefined shift check if miplevel is bigger than the integer bit width.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 100;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Non square mip map halves the resolution until a 1x1 dimension
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        // Mip maps: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 * 1, 1 * 1
+        descriptor.mipLevelCount = 6;
+
+        device.CreateTexture(&descriptor);
+    }
+
+    // Non square mip map for a 3D textures
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        descriptor.size.depthOrArrayLayers = 64;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        // Non square mip map halves width, height and depth until a 1x1x1 dimension for a 3D
+        // texture. So there are 7 mipmaps at most: 32 * 8 * 64, 16 * 4 * 32, 8 * 2 * 16,
+        // 4 * 1 * 8, 2 * 1 * 4, 1 * 1 * 2, 1 * 1 * 1.
+        descriptor.mipLevelCount = 7;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Non square mip map for 2D textures with depth > 1
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        descriptor.size.depthOrArrayLayers = 64;
+        // Non square mip map halves width and height until a 1x1 dimension for a 2D texture,
+        // even its depth > 1. So there are 6 mipmaps at most: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 *
+        // 1, 1 * 1.
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.mipLevelCount = 7;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        descriptor.mipLevelCount = 6;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Mip level equal to the maximum for a 2D texture is allowed
+    {
+        uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = maxTextureDimension2D;
+        descriptor.size.height = maxTextureDimension2D;
+        descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
+
+        device.CreateTexture(&descriptor);
+    }
+
+    // Mip level exceeding the maximum for a 2D texture not allowed
+    {
+        uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = maxTextureDimension2D;
+        descriptor.size.height = maxTextureDimension2D;
+        descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // 1D textures can only have a single mip level.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        descriptor.size.width = 32;
+        descriptor.size.height = 1;
+
+        // Having a single mip level is allowed.
+        descriptor.mipLevelCount = 1;
+        device.CreateTexture(&descriptor);
+
+        // Having more than 1 is an error.
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test the validation of array layer count
+TEST_F(TextureValidationTest, ArrayLayerCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Array layer count less than maxTextureArrayLayers is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Array layer count equal to maxTextureArrayLayers is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test the validation of 1D texture size
+TEST_F(TextureValidationTest, 1DTextureSize) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    wgpu::TextureDescriptor defaultDescriptor;
+    defaultDescriptor.size = {4, 1, 1};
+    defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+    defaultDescriptor.usage = wgpu::TextureUsage::CopySrc;
+    defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Width must be in [1, kMaxTextureDimension1D]
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.width = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+        desc.size.width = 1;
+        device.CreateTexture(&desc);
+
+        desc.size.width = supportedLimits.maxTextureDimension1D;
+        device.CreateTexture(&desc);
+        desc.size.width = supportedLimits.maxTextureDimension1D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+
+    // Height must be 1
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.height = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+        desc.size.height = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+
+    // DepthOrArrayLayers must be 1
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.depthOrArrayLayers = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+        desc.size.depthOrArrayLayers = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+}
+
+// Test the validation of 2D texture size
+TEST_F(TextureValidationTest, 2DTextureSize) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Out-of-bound texture dimension is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size.width = 1;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Zero-sized texture is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size = {0, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 0, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, 0};
+        // 2D texture with depth=0 is not allowed
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Texture size less than max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Texture size equal to max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test the validation of 3D texture size
+TEST_F(TextureValidationTest, 3DTextureSize) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    defaultDescriptor.dimension = wgpu::TextureDimension::e3D;
+    defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Out-of-bound texture dimension is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Zero-sized texture is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {0, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 0, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, 0};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Texture size less than max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
+                           supportedLimits.maxTextureDimension3D >> 1,
+                           supportedLimits.maxTextureDimension3D >> 1};
+        device.CreateTexture(&descriptor);
+    }
+
+    // Texture size equal to max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D,
+                           supportedLimits.maxTextureDimension3D,
+                           supportedLimits.maxTextureDimension3D};
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test that depth/stencil formats are invalid for 1D and 3D texture
+TEST_F(TextureValidationTest, DepthStencilFormatsFor1DAnd3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    wgpu::TextureFormat depthStencilFormats[] = {
+        wgpu::TextureFormat::Stencil8,     wgpu::TextureFormat::Depth16Unorm,
+        wgpu::TextureFormat::Depth24Plus,  wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth32Float,
+    };
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
+        for (wgpu::TextureFormat format : depthStencilFormats) {
+            descriptor.format = format;
+            descriptor.dimension = dimension;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test that it is valid to destroy a texture
+TEST_F(TextureValidationTest, DestroyTexture) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    texture.Destroy();
+}
+
+// Test that it's valid to destroy a destroyed texture
+TEST_F(TextureValidationTest, DestroyDestroyedTexture) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    texture.Destroy();
+    texture.Destroy();
+}
+
+// Test that it's invalid to submit a destroyed texture in a queue
+// in the case of destroy, encode, submit
+TEST_F(TextureValidationTest, DestroyEncodeSubmit) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({textureView});
+
+    // Destroy the texture
+    texture.Destroy();
+
+    wgpu::CommandEncoder encoder_post_destroy = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder_post_destroy.BeginRenderPass(&renderPass);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder_post_destroy.Finish();
+
+    // Submit should fail due to destroyed texture
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test that it's invalid to submit a destroyed texture in a queue
+// in the case of encode, destroy, submit
+TEST_F(TextureValidationTest, EncodeDestroySubmit) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({textureView});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    // Destroy the texture
+    texture.Destroy();
+
+    // Submit should fail due to destroyed texture
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test it is an error to create an RenderAttachment texture with a non-renderable format.
+TEST_F(TextureValidationTest, NonRenderableAndRenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    // Succeeds because RGBA8Unorm is renderable
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    device.CreateTexture(&descriptor);
+
+    for (wgpu::TextureFormat format : kNonRenderableColorFormats) {
+        // Fails because `format` is non-renderable
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test it is an error to create a Storage texture with any format that doesn't support
+// TextureUsage::StorageBinding texture usages.
+TEST_F(TextureValidationTest, TextureFormatNotSupportTextureUsageStorage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::StorageBinding;
+
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        descriptor.format = format;
+        if (utils::TextureFormatSupportsStorageTexture(format)) {
+            device.CreateTexture(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test it is an error to create a RenderAttachment texture with the texture dimensions that
+// doesn't support TextureUsage::RenderAttachment texture usages.
+TEST_F(TextureValidationTest, TextureDimensionNotSupportRenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    constexpr std::array<wgpu::TextureDimension, 3> kTextureDimensions = {
+        {wgpu::TextureDimension::e1D, wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D}};
+    for (wgpu::TextureDimension dimension : kTextureDimensions) {
+        descriptor.dimension = dimension;
+        if (dimension == wgpu::TextureDimension::e2D) {
+            device.CreateTexture(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test it is an error to create a texture with format "Undefined".
+TEST_F(TextureValidationTest, TextureFormatUndefined) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Undefined;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with depth24unorm-stencil8 will fail when the feature
+// Depth24UnormStencil8 is not enabled.
+TEST_F(TextureValidationTest, UseD24S8FormatWithoutEnablingFeature) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with depth32float-stencil8 will fail when the feature
+// Depth32FloatStencil8 is not enabled.
+TEST_F(TextureValidationTest, UseD32S8FormatWithoutEnablingFeature) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with BC format will fail when the feature
+// textureCompressionBC is not enabled.
+TEST_F(TextureValidationTest, UseBCFormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kBCFormats) {
         wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that the creation of a texture with ETC2 format will fail when the feature
+// textureCompressionETC2 is not enabled.
+TEST_F(TextureValidationTest, UseETC2FormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kETC2Formats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that the creation of a texture with ASTC format will fail when the feature
+// textureCompressionASTC is not enabled.
+TEST_F(TextureValidationTest, UseASTCFormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kASTCFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+class D24S8TextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that depth24unorm-stencil8 format is invalid for 3D texture
+TEST_F(D24S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
         descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+        descriptor.dimension = dimension;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that the creation of a texture with depth32float-stencil8 will fail when the feature
-    // Depth32FloatStencil8 is not enabled.
-    TEST_F(TextureValidationTest, UseD32S8FormatWithoutEnablingFeature) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+class D32S8TextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that depth32float-stencil8 format is invalid for 3D texture
+TEST_F(D32S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
         descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+        descriptor.dimension = dimension;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that the creation of a texture with BC format will fail when the feature
-    // textureCompressionBC is not enabled.
-    TEST_F(TextureValidationTest, UseBCFormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kBCFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+class CompressedTextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                 wgpu::FeatureName::TextureCompressionETC2,
+                                                 wgpu::FeatureName::TextureCompressionASTC};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 3;
+
+        // TODO(dawn:814): Remove when 1D texture support is complete.
+        const char* kDisallowUnsafeApis = "disallow_unsafe_apis";
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        togglesDesc.forceDisabledToggles = &kDisallowUnsafeApis;
+        togglesDesc.forceDisabledTogglesCount = 1;
+
+        descriptor.nextInChain = &togglesDesc;
+
+        return adapter.CreateDevice(&descriptor);
     }
 
-    // Test that the creation of a texture with ETC2 format will fail when the feature
-    // textureCompressionETC2 is not enabled.
-    TEST_F(TextureValidationTest, UseETC2FormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kETC2Formats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+        wgpu::TextureDescriptor descriptor =
+            TextureValidationTest::CreateDefaultTextureDescriptor();
+        descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                           wgpu::TextureUsage::TextureBinding;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        return descriptor;
     }
 
-    // Test that the creation of a texture with ASTC format will fail when the feature
-    // textureCompressionASTC is not enabled.
-    TEST_F(TextureValidationTest, UseASTCFormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kASTCFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
+  private:
+    // Choose the LCM of all current compressed texture format texel dimensions as the
+    // dimensions of the default texture.
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+};
 
-    class D24S8TextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
+// Test that only CopySrc, CopyDst and Sampled are accepted as usage in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, TextureUsage) {
+    wgpu::TextureUsage invalidUsages[] = {
+        wgpu::TextureUsage::RenderAttachment,
+        wgpu::TextureUsage::StorageBinding,
+        wgpu::TextureUsage::Present,
     };
-
-    // Test that depth24unorm-stencil8 format is invalid for 3D texture
-    TEST_F(D24S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
-            descriptor.dimension = dimension;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    class D32S8TextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
-
-    // Test that depth32float-stencil8 format is invalid for 3D texture
-    TEST_F(D32S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
-            descriptor.dimension = dimension;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    class CompressedTextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
-                                                     wgpu::FeatureName::TextureCompressionETC2,
-                                                     wgpu::FeatureName::TextureCompressionASTC};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 3;
-
-            // TODO(dawn:814): Remove when 1D texture support is complete.
-            const char* kDisallowUnsafeApis = "disallow_unsafe_apis";
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            togglesDesc.forceDisabledToggles = &kDisallowUnsafeApis;
-            togglesDesc.forceDisabledTogglesCount = 1;
-
-            descriptor.nextInChain = &togglesDesc;
-
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
-            wgpu::TextureDescriptor descriptor =
-                TextureValidationTest::CreateDefaultTextureDescriptor();
-            descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
-                               wgpu::TextureUsage::TextureBinding;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            return descriptor;
-        }
-
-      private:
-        // Choose the LCM of all current compressed texture format texel dimensions as the
-        // dimensions of the default texture.
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-    };
-
-    // Test that only CopySrc, CopyDst and Sampled are accepted as usage in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, TextureUsage) {
-        wgpu::TextureUsage invalidUsages[] = {
-            wgpu::TextureUsage::RenderAttachment,
-            wgpu::TextureUsage::StorageBinding,
-            wgpu::TextureUsage::Present,
-        };
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            for (wgpu::TextureUsage usage : invalidUsages) {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.usage = usage;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test that using various MipLevelCount is allowed for compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, MipLevelCount) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            for (uint32_t mipLevels : {1, 3, 6}) {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.mipLevelCount = mipLevels;
-                device.CreateTexture(&descriptor);
-            }
-        }
-    }
-
-    // Test that it is invalid to specify SampleCount>1 in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, SampleCount) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        for (wgpu::TextureUsage usage : invalidUsages) {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.sampleCount = 4;
+            descriptor.usage = usage;
             ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
         }
     }
+}
 
-    // Test that it is allowed to create a 2D texture with depth>1 in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 2DArrayTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+// Test that using various MipLevelCount is allowed for compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, MipLevelCount) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        for (uint32_t mipLevels : {1, 3, 6}) {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.size.depthOrArrayLayers = 6;
+            descriptor.mipLevelCount = mipLevels;
             device.CreateTexture(&descriptor);
         }
     }
+}
 
-    // Test that it is not allowed to create a 1D texture in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 1DTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            // Unfortunately we can't use the block height here otherwise validation for the max
-            // texture 1D size will trigger. We check the error message below to make sure the
-            // correct code path is covered.
-            descriptor.size.height = 1;
-            descriptor.size.depthOrArrayLayers = 1;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            ASSERT_DEVICE_ERROR(
-                device.CreateTexture(&descriptor),
-                testing::HasSubstr(
-                    "The dimension (TextureDimension::e1D) of a texture with a compressed format"));
-        }
+// Test that it is invalid to specify SampleCount>1 in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, SampleCount) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.sampleCount = 4;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that it is not allowed to create a 3D texture in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 3DTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+// Test that it is allowed to create a 2D texture with depth>1 in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 2DArrayTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.size.depthOrArrayLayers = 6;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test that it is not allowed to create a 1D texture in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 1DTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        // Unfortunately we can't use the block height here otherwise validation for the max
+        // texture 1D size will trigger. We check the error message below to make sure the
+        // correct code path is covered.
+        descriptor.size.height = 1;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        ASSERT_DEVICE_ERROR(
+            device.CreateTexture(&descriptor),
+            testing::HasSubstr(
+                "The dimension (TextureDimension::e1D) of a texture with a compressed format"));
+    }
+}
+
+// Test that it is not allowed to create a 3D texture in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 3DTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.size.depthOrArrayLayers = 4;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that it is invalid to use numbers for a texture's width/height that are not multiples
+// of the compressed block sizes.
+TEST_F(CompressedTextureFormatsValidationTests, TextureSize) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Test that the default size (120 x 120) is valid for all formats.
+        {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.size.depthOrArrayLayers = 4;
-            descriptor.dimension = wgpu::TextureDimension::e3D;
+            ASSERT_TRUE(descriptor.size.width % blockWidth == 0 &&
+                        descriptor.size.height % blockHeight == 0);
+            device.CreateTexture(&descriptor);
+        }
+
+        // Test that invalid width should cause an error. Note that if the block width of the
+        // compression type is even, we test that alignment to half the width is not sufficient.
+        {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.width =
+                blockWidth % 2 == 0 ? blockWidth - (blockWidth / 2) : blockWidth - 1;
             ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
         }
-    }
 
-    // Test that it is invalid to use numbers for a texture's width/height that are not multiples
-    // of the compressed block sizes.
-    TEST_F(CompressedTextureFormatsValidationTests, TextureSize) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        // Test that invalid width should cause an error. Note that if the block height of the
+        // compression type is even, we test that alignment to half the height is not
+        // sufficient.
+        {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.height =
+                blockHeight % 2 == 0 ? blockHeight - (blockHeight / 2) : blockHeight - 1;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
 
-            // Test that the default size (120 x 120) is valid for all formats.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                ASSERT_TRUE(descriptor.size.width % blockWidth == 0 &&
-                            descriptor.size.height % blockHeight == 0);
-                device.CreateTexture(&descriptor);
-            }
-
-            // Test that invalid width should cause an error. Note that if the block width of the
-            // compression type is even, we test that alignment to half the width is not sufficient.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.width =
-                    blockWidth % 2 == 0 ? blockWidth - (blockWidth / 2) : blockWidth - 1;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-
-            // Test that invalid width should cause an error. Note that if the block height of the
-            // compression type is even, we test that alignment to half the height is not
-            // sufficient.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.height =
-                    blockHeight % 2 == 0 ? blockHeight - (blockHeight / 2) : blockHeight - 1;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-
-            // Test a working dimension based on some constant multipliers to the dimensions.
-            {
-                constexpr uint32_t kWidthMultiplier = 3;
-                constexpr uint32_t kHeightMultiplier = 8;
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.width = kWidthMultiplier * blockWidth;
-                descriptor.size.height = kHeightMultiplier * blockHeight;
-                device.CreateTexture(&descriptor);
-            }
+        // Test a working dimension based on some constant multipliers to the dimensions.
+        {
+            constexpr uint32_t kWidthMultiplier = 3;
+            constexpr uint32_t kHeightMultiplier = 8;
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.width = kWidthMultiplier * blockWidth;
+            descriptor.size.height = kHeightMultiplier * blockHeight;
+            device.CreateTexture(&descriptor);
         }
     }
+}
 
 }  // namespace
diff --git a/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
index 51d1c06..d56647b 100644
--- a/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
@@ -18,999 +18,996 @@
 
 namespace {
 
-    class TextureViewValidationTest : public ValidationTest {};
+class TextureViewValidationTest : public ValidationTest {};
 
-    constexpr uint32_t kWidth = 32u;
-    constexpr uint32_t kHeight = 32u;
-    constexpr uint32_t kDepth = 6u;
-    constexpr uint32_t kDefaultMipLevels = 6u;
+constexpr uint32_t kWidth = 32u;
+constexpr uint32_t kHeight = 32u;
+constexpr uint32_t kDepth = 6u;
+constexpr uint32_t kDefaultMipLevels = 6u;
 
-    constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-    wgpu::Texture Create2DArrayTexture(wgpu::Device& device,
-                                       uint32_t arrayLayerCount,
-                                       uint32_t width = kWidth,
-                                       uint32_t height = kHeight,
-                                       uint32_t mipLevelCount = kDefaultMipLevels,
-                                       uint32_t sampleCount = 1) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = sampleCount;
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DArrayTexture(wgpu::Device& device,
+                                   uint32_t arrayLayerCount,
+                                   uint32_t width = kWidth,
+                                   uint32_t height = kHeight,
+                                   uint32_t mipLevelCount = kDefaultMipLevels,
+                                   uint32_t sampleCount = 1) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = sampleCount;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::Texture Create3DTexture(wgpu::Device& device) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e3D;
-        descriptor.size = {kWidth, kHeight, kDepth};
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultTextureFormat;
+wgpu::Texture Create3DTexture(wgpu::Device& device) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+    descriptor.size = {kWidth, kHeight, kDepth};
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.mipLevelCount = kDefaultMipLevels;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::Texture Create1DTexture(wgpu::Device& device) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e1D;
+    descriptor.size = {kWidth, 1, 1};
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::Texture CreateDepthStencilTexture(wgpu::Device& device, wgpu::TextureFormat format) {
+    wgpu::TextureDescriptor descriptor = {};
+    descriptor.size = {kWidth, kHeight, kDepth};
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+    descriptor.mipLevelCount = kDefaultMipLevels;
+    descriptor.format = format;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::TextureViewDescriptor CreateDefaultViewDescriptor(wgpu::TextureViewDimension dimension) {
+    wgpu::TextureViewDescriptor descriptor;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.dimension = dimension;
+    descriptor.baseMipLevel = 0;
+    if (dimension != wgpu::TextureViewDimension::e1D) {
         descriptor.mipLevelCount = kDefaultMipLevels;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
+    }
+    descriptor.baseArrayLayer = 0;
+    descriptor.arrayLayerCount = 1;
+    return descriptor;
+}
+
+// Test creating texture view on a 2D non-array texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2D) {
+    wgpu::Texture texture = Create2DArrayTexture(device, 1);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    wgpu::Texture Create1DTexture(wgpu::Device& device) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e1D;
-        descriptor.size = {kWidth, 1, 1};
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    wgpu::Texture CreateDepthStencilTexture(wgpu::Device& device, wgpu::TextureFormat format) {
-        wgpu::TextureDescriptor descriptor = {};
-        descriptor.size = {kWidth, kHeight, kDepth};
-        descriptor.usage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-        descriptor.mipLevelCount = kDefaultMipLevels;
-        descriptor.format = format;
-        return device.CreateTexture(&descriptor);
-    }
-
-    wgpu::TextureViewDescriptor CreateDefaultViewDescriptor(wgpu::TextureViewDimension dimension) {
-        wgpu::TextureViewDescriptor descriptor;
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.dimension = dimension;
-        descriptor.baseMipLevel = 0;
-        if (dimension != wgpu::TextureViewDimension::e1D) {
-            descriptor.mipLevelCount = kDefaultMipLevels;
-        }
-        descriptor.baseArrayLayer = 0;
+    // It is OK to create a 2D texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
         descriptor.arrayLayerCount = 1;
-        return descriptor;
-    }
-
-    // Test creating texture view on a 2D non-array texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2D) {
-        wgpu::Texture texture = Create2DArrayTexture(device, 1);
-
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 2D texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to view a layer past the end of the texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 1-layer 2D array texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 3D texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
-        // k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
-
-            descriptor.baseMipLevel = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to make the mip level out of range.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = kDefaultMipLevels + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = 1;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            descriptor.mipLevelCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 2D array texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2DArray) {
-        constexpr uint32_t kDefaultArrayLayers = 6;
-
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 2D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is OK to create a 2D array texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 3D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 1D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e1D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
-        // layers k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-
-            descriptor.baseArrayLayer = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error for the array layer range of the view to exceed that of the texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = kDefaultArrayLayers + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = 1;
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = kDefaultArrayLayers;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 3D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture3D) {
-        wgpu::Texture texture = Create3DTexture(device);
-
-        wgpu::TextureViewDescriptor base3DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e3D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 3D texture view on a 3D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 1D/2D/2DArray/Cube/CubeArray texture view on a 3D texture.
-        {
-            wgpu::TextureViewDimension invalidDimensions[] = {
-                wgpu::TextureViewDimension::e1D,       wgpu::TextureViewDimension::e2D,
-                wgpu::TextureViewDimension::e2DArray,  wgpu::TextureViewDimension::Cube,
-                wgpu::TextureViewDimension::CubeArray,
-            };
-            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
-                wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-                descriptor.dimension = dimension;
-                if (dimension == wgpu::TextureViewDimension::Cube ||
-                    dimension == wgpu::TextureViewDimension::CubeArray) {
-                    descriptor.arrayLayerCount = 6;
-                }
-                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            }
-        }
-
-        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
-        // k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
-
-            descriptor.baseMipLevel = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to make the mip level out of range.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = kDefaultMipLevels + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = 1;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            descriptor.mipLevelCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
-        // layers k..end. But baseArrayLayer must be 0, and arrayLayerCount must be 1 at most for 3D
-        // texture view.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            descriptor.baseArrayLayer = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.arrayLayerCount = kDepth;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 1D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture1D) {
-        wgpu::Texture texture = Create1DTexture(device);
-
-        wgpu::TextureViewDescriptor base1DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e1D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 1D texture view on a 1D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 2D/2DArray/Cube/CubeArray/3D texture view on a 1D texture.
-        {
-            wgpu::TextureViewDimension invalidDimensions[] = {
-                wgpu::TextureViewDimension::e2D,  wgpu::TextureViewDimension::e2DArray,
-                wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray,
-                wgpu::TextureViewDimension::e3D,
-            };
-            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
-                wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-                descriptor.dimension = dimension;
-                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            }
-        }
-
-        // No tests for setting mip levels / array layer ranges because 1D textures can only have
-        // a single mip and layer.
-    }
-
-    // Test creating texture view on a multisampled 2D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnMultisampledTexture2D) {
-        wgpu::Texture texture =
-            Create2DArrayTexture(device, /* arrayLayerCount */ 1, kWidth, kHeight,
-                                 /* mipLevelCount */ 1, /* sampleCount */ 4);
-
-        // It is OK to create a 2D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 1-layer 2D array texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 1D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e1D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 3D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 2D texture with more than 1 array layer.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DArray) {
-        constexpr uint32_t kDefaultArrayLayers = 8;
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-            // Setting view dimension to 2D, its arrayLayer will default to 1. And view creation
-            // will success.
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            texture.CreateView(&descriptor);
-            // Setting view dimension to Cube, its arrayLayer will default to 6.
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 2;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            // Setting view dimension to CubeArray, its arrayLayer will default to
-            // size.depthOrArrayLayers (kDefaultArrayLayers) - baseArrayLayer.
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.baseArrayLayer = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = 2;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-
-            // Setting array layers to non-0 means the dimensionality will
-            // default to 2D so by itself it causes an error.
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 2D texture with only 1 array layer.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DNonArray) {
-        constexpr uint32_t kDefaultArrayLayers = 1;
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            texture.CreateView(&descriptor);
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 3D texture.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults3D) {
-        wgpu::Texture texture = Create3DTexture(device);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = kDepth;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Regression test for crbug.com/1314049. Format default depends on the aspect.
-    // Test that computing the default does not crash if the aspect is invalid.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaultsInvalidAspect) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.aspect = static_cast<wgpu::TextureAspect>(-1);
-
-        // Validation should catch the invalid aspect.
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc),
-                            testing::HasSubstr("Invalid value for WGPUTextureAspect"));
-    }
-
-    // Test creating cube map texture view
-    TEST_F(TextureViewValidationTest, CreateCubeMapTextureView) {
-        constexpr uint32_t kDefaultArrayLayers = 16;
-
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a cube map texture view with arrayLayerCount == 6.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 6;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a cube map texture view with arrayLayerCount != 6.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a cube map array texture view with arrayLayerCount % 6 == 0.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 12;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a cube map array texture view with arrayLayerCount % 6 != 0.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 11;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a cube map texture view with width != height.
-        {
-            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
-
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 6;
-            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a cube map array texture view with width != height.
-        {
-            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
-
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 12;
-            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
-        }
-    }
-
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(TextureViewValidationTest, TextureViewFormatCompatibility) {
-        wgpu::TextureDescriptor textureDesc = {};
-        textureDesc.size.width = 4;
-        textureDesc.size.height = 4;
-        textureDesc.usage = wgpu::TextureUsage::TextureBinding;
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-
-        // It is an error to create an sRGB texture view from an RGB texture, without viewFormats.
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create an RGB texture view from an sRGB texture, without viewFormats.
-        {
-            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create a texture view with a depth-stencil format of an RGBA texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create a texture view with a depth format of a depth-stencil texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is invalid to create a texture view with a combined depth-stencil format if only
-        // the depth aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is invalid to create a texture view with a combined depth-stencil format if only
-        // the stencil aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Regression test for crbug.com/1312780.
-        // viewFormat is not supported (Null backend does not support any optional features).
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24UnormStencil8;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc), testing::HasSubstr("Unsupported"));
-        }
-
-        // It is valid to create a texture view with a depth format of a depth-stencil texture
-        // if the depth only aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            texture.CreateView(&viewDesc);
-
-            viewDesc = {};
-        }
-
-        // Prep for testing a single view format in viewFormats.
-        wgpu::TextureFormat viewFormat;
-        textureDesc.viewFormats = &viewFormat;
-        textureDesc.viewFormatCount = 1;
-
-        // An aspect format is not a valid view format of a depth-stencil texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewFormat = wgpu::TextureFormat::Depth24Plus;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
-        }
-
-        // Test that a RGBA texture can be viewed as both RGBA and RGBASrgb, but not BGRA or
-        // BGRASrgb
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewFormat = wgpu::TextureFormat::RGBA8UnormSrgb;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test that a BGRASrgb texture can be viewed as both BGRA and BGRASrgb, but not RGBA or
-        // RGBASrgb
-        {
-            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            viewFormat = wgpu::TextureFormat::BGRA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test an RGBA format may be viewed as RGBA (same)
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewFormat = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test that duplicate, and multiple view formats are allowed.
-        {
-            std::array<wgpu::TextureFormat, 5> viewFormats = {
-                wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Unorm,
-                wgpu::TextureFormat::RGBA8Unorm,     wgpu::TextureFormat::RGBA8UnormSrgb,
-                wgpu::TextureFormat::RGBA8Unorm,
-            };
-            textureDesc.viewFormats = viewFormats.data();
-            textureDesc.viewFormatCount = viewFormats.size();
-
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-    }
-
-    // Test that it's valid to create a texture view from a destroyed texture
-    TEST_F(TextureViewValidationTest, DestroyCreateTextureView) {
-        wgpu::Texture texture = Create2DArrayTexture(device, 1);
-        wgpu::TextureViewDescriptor descriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
-        texture.Destroy();
         texture.CreateView(&descriptor);
     }
 
-    // Test that the selected TextureAspects must exist in the texture format
-    TEST_F(TextureViewValidationTest, AspectMustExist) {
-        wgpu::TextureDescriptor descriptor = {};
-        descriptor.size = {1, 1, 1};
-        descriptor.usage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-
-        // Can select: All and DepthOnly from Depth32Float, but not StencilOnly
-        {
-            descriptor.format = wgpu::TextureFormat::Depth32Float;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-        }
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth24PlusStencil8
-        {
-            descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
-
-        // Can select: All from RGBA8Unorm
-        {
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-        }
+    // It is an error to view a layer past the end of the texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    class D24S8TextureViewValidationTests : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
-
-    // Test that the selected TextureAspects must exist in the Depth24UnormStencil8 texture format
-    TEST_F(D24S8TextureViewValidationTests, AspectMustExist) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth24UnormStencil8
-        {
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
+    // It is OK to create a 1-layer 2D array texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
     }
 
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(D24S8TextureViewValidationTests, TextureViewFormatCompatibility) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+    // It is an error to create a 3D texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
 
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+    // k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
 
-        // It is an error to create a texture view in color format on a depth-stencil texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.baseMipLevel = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to make the mip level out of range.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = kDefaultMipLevels + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = 1;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        descriptor.mipLevelCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 2D array texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2DArray) {
+    constexpr uint32_t kDefaultArrayLayers = 6;
+
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 2D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is OK to create a 2D array texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 3D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 1D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e1D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+    // layers k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+
+        descriptor.baseArrayLayer = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error for the array layer range of the view to exceed that of the texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = kDefaultArrayLayers + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = 1;
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = kDefaultArrayLayers;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 3D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture3D) {
+    wgpu::Texture texture = Create3DTexture(device);
+
+    wgpu::TextureViewDescriptor base3DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e3D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 3D texture view on a 3D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 1D/2D/2DArray/Cube/CubeArray texture view on a 3D texture.
+    {
+        wgpu::TextureViewDimension invalidDimensions[] = {
+            wgpu::TextureViewDimension::e1D,       wgpu::TextureViewDimension::e2D,
+            wgpu::TextureViewDimension::e2DArray,  wgpu::TextureViewDimension::Cube,
+            wgpu::TextureViewDimension::CubeArray,
+        };
+        for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.dimension = dimension;
+            if (dimension == wgpu::TextureViewDimension::Cube ||
+                dimension == wgpu::TextureViewDimension::CubeArray) {
+                descriptor.arrayLayerCount = 6;
+            }
             ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
         }
     }
 
-    class D32S8TextureViewValidationTests : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
+    // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+    // k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
 
-    // Test that the selected TextureAspects must exist in the Depth32FloatStencil8 texture format
-    TEST_F(D32S8TextureViewValidationTests, AspectMustExist) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth32FloatStencil8
-        {
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
+        descriptor.baseMipLevel = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(D32S8TextureViewValidationTests, TextureViewFormatCompatibility) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+    // It is an error to make the mip level out of range.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = kDefaultMipLevels + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = 1;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        descriptor.mipLevelCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
 
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+    // layers k..end. But baseArrayLayer must be 0, and arrayLayerCount must be 1 at most for 3D
+    // texture view.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        descriptor.baseArrayLayer = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
 
-        // It is an error to create a texture view in color format on a depth-stencil texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.arrayLayerCount = kDepth;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 1D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture1D) {
+    wgpu::Texture texture = Create1DTexture(device);
+
+    wgpu::TextureViewDescriptor base1DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e1D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 1D texture view on a 1D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 2D/2DArray/Cube/CubeArray/3D texture view on a 1D texture.
+    {
+        wgpu::TextureViewDimension invalidDimensions[] = {
+            wgpu::TextureViewDimension::e2D,  wgpu::TextureViewDimension::e2DArray,
+            wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray,
+            wgpu::TextureViewDimension::e3D,
+        };
+        for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+            descriptor.dimension = dimension;
             ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
         }
     }
 
+    // No tests for setting mip levels / array layer ranges because 1D textures can only have
+    // a single mip and layer.
+}
+
+// Test creating texture view on a multisampled 2D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnMultisampledTexture2D) {
+    wgpu::Texture texture = Create2DArrayTexture(device, /* arrayLayerCount */ 1, kWidth, kHeight,
+                                                 /* mipLevelCount */ 1, /* sampleCount */ 4);
+
+    // It is OK to create a 2D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 1-layer 2D array texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 1D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e1D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 3D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 2D texture with more than 1 array layer.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DArray) {
+    constexpr uint32_t kDefaultArrayLayers = 8;
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+        // Setting view dimension to 2D, its arrayLayer will default to 1. And view creation
+        // will success.
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        texture.CreateView(&descriptor);
+        // Setting view dimension to Cube, its arrayLayer will default to 6.
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 2;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        // Setting view dimension to CubeArray, its arrayLayer will default to
+        // size.depthOrArrayLayers (kDefaultArrayLayers) - baseArrayLayer.
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.baseArrayLayer = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = 2;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+
+        // Setting array layers to non-0 means the dimensionality will
+        // default to 2D so by itself it causes an error.
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 2D texture with only 1 array layer.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DNonArray) {
+    constexpr uint32_t kDefaultArrayLayers = 1;
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        texture.CreateView(&descriptor);
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 3D texture.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults3D) {
+    wgpu::Texture texture = Create3DTexture(device);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = kDepth;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Regression test for crbug.com/1314049. Format default depends on the aspect.
+// Test that computing the default does not crash if the aspect is invalid.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaultsInvalidAspect) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.aspect = static_cast<wgpu::TextureAspect>(-1);
+
+    // Validation should catch the invalid aspect.
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc),
+                        testing::HasSubstr("Invalid value for WGPUTextureAspect"));
+}
+
+// Test creating cube map texture view
+TEST_F(TextureViewValidationTest, CreateCubeMapTextureView) {
+    constexpr uint32_t kDefaultArrayLayers = 16;
+
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a cube map texture view with arrayLayerCount == 6.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 6;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a cube map texture view with arrayLayerCount != 6.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a cube map array texture view with arrayLayerCount % 6 == 0.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 12;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a cube map array texture view with arrayLayerCount % 6 != 0.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 11;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a cube map texture view with width != height.
+    {
+        wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 6;
+        ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a cube map array texture view with width != height.
+    {
+        wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 12;
+        ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(TextureViewValidationTest, TextureViewFormatCompatibility) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size.width = 4;
+    textureDesc.size.height = 4;
+    textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+
+    // It is an error to create an sRGB texture view from an RGB texture, without viewFormats.
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create an RGB texture view from an sRGB texture, without viewFormats.
+    {
+        textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create a texture view with a depth-stencil format of an RGBA texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create a texture view with a depth format of a depth-stencil texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is invalid to create a texture view with a combined depth-stencil format if only
+    // the depth aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is invalid to create a texture view with a combined depth-stencil format if only
+    // the stencil aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Regression test for crbug.com/1312780.
+    // viewFormat is not supported (Null backend does not support any optional features).
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24UnormStencil8;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc), testing::HasSubstr("Unsupported"));
+    }
+
+    // It is valid to create a texture view with a depth format of a depth-stencil texture
+    // if the depth only aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        texture.CreateView(&viewDesc);
+
+        viewDesc = {};
+    }
+
+    // Prep for testing a single view format in viewFormats.
+    wgpu::TextureFormat viewFormat;
+    textureDesc.viewFormats = &viewFormat;
+    textureDesc.viewFormatCount = 1;
+
+    // An aspect format is not a valid view format of a depth-stencil texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewFormat = wgpu::TextureFormat::Depth24Plus;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+    }
+
+    // Test that a RGBA texture can be viewed as both RGBA and RGBASrgb, but not BGRA or
+    // BGRASrgb
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewFormat = wgpu::TextureFormat::RGBA8UnormSrgb;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test that a BGRASrgb texture can be viewed as both BGRA and BGRASrgb, but not RGBA or
+    // RGBASrgb
+    {
+        textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        viewFormat = wgpu::TextureFormat::BGRA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test an RGBA format may be viewed as RGBA (same)
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewFormat = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test that duplicate, and multiple view formats are allowed.
+    {
+        std::array<wgpu::TextureFormat, 5> viewFormats = {
+            wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Unorm,
+            wgpu::TextureFormat::RGBA8Unorm,     wgpu::TextureFormat::RGBA8UnormSrgb,
+            wgpu::TextureFormat::RGBA8Unorm,
+        };
+        textureDesc.viewFormats = viewFormats.data();
+        textureDesc.viewFormatCount = viewFormats.size();
+
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+}
+
+// Test that it's valid to create a texture view from a destroyed texture
+TEST_F(TextureViewValidationTest, DestroyCreateTextureView) {
+    wgpu::Texture texture = Create2DArrayTexture(device, 1);
+    wgpu::TextureViewDescriptor descriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    texture.Destroy();
+    texture.CreateView(&descriptor);
+}
+
+// Test that the selected TextureAspects must exist in the texture format
+TEST_F(TextureViewValidationTest, AspectMustExist) {
+    wgpu::TextureDescriptor descriptor = {};
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+    // Can select: All and DepthOnly from Depth32Float, but not StencilOnly
+    {
+        descriptor.format = wgpu::TextureFormat::Depth32Float;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+    }
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth24PlusStencil8
+    {
+        descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+
+    // Can select: All from RGBA8Unorm
+    {
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+    }
+}
+
+class D24S8TextureViewValidationTests : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that the selected TextureAspects must exist in the Depth24UnormStencil8 texture format
+TEST_F(D24S8TextureViewValidationTests, AspectMustExist) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth24UnormStencil8
+    {
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(D24S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a texture view in color format on a depth-stencil texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+class D32S8TextureViewValidationTests : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that the selected TextureAspects must exist in the Depth32FloatStencil8 texture format
+TEST_F(D32S8TextureViewValidationTests, AspectMustExist) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth32FloatStencil8
+    {
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(D32S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a texture view in color format on a depth-stencil texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
index 34acac4..1c0d9be 100644
--- a/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
@@ -18,74 +18,33 @@
 
 namespace {
 
-    class ToggleValidationTest : public ValidationTest {};
+class ToggleValidationTest : public ValidationTest {};
 
-    // Tests querying the detail of a toggle from dawn::native::InstanceBase works correctly.
-    TEST_F(ToggleValidationTest, QueryToggleInfo) {
-        // Query with a valid toggle name
-        {
-            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
-            const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kValidToggleName);
-            ASSERT_NE(nullptr, toggleInfo);
-            ASSERT_NE(nullptr, toggleInfo->name);
-            ASSERT_NE(nullptr, toggleInfo->description);
-            ASSERT_NE(nullptr, toggleInfo->url);
-        }
-
-        // Query with an invalid toggle name
-        {
-            const char* kInvalidToggleName = "!@#$%^&*";
-            const dawn::native::ToggleInfo* toggleInfo =
-                instance->GetToggleInfo(kInvalidToggleName);
-            ASSERT_EQ(nullptr, toggleInfo);
-        }
+// Tests querying the detail of a toggle from dawn::native::InstanceBase works correctly.
+TEST_F(ToggleValidationTest, QueryToggleInfo) {
+    // Query with a valid toggle name
+    {
+        const char* kValidToggleName = "emulate_store_and_msaa_resolve";
+        const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kValidToggleName);
+        ASSERT_NE(nullptr, toggleInfo);
+        ASSERT_NE(nullptr, toggleInfo->name);
+        ASSERT_NE(nullptr, toggleInfo->description);
+        ASSERT_NE(nullptr, toggleInfo->url);
     }
 
-    // Tests overriding toggles when creating a device works correctly.
-    TEST_F(ToggleValidationTest, OverrideToggleUsage) {
-        // Create device with a valid name of a toggle
-        {
-            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            descriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = &kValidToggleName;
-            togglesDesc.forceEnabledTogglesCount = 1;
-
-            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
-            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
-            bool validToggleExists = false;
-            for (const char* toggle : toggleNames) {
-                if (strcmp(toggle, kValidToggleName) == 0) {
-                    validToggleExists = true;
-                }
-            }
-            ASSERT_EQ(validToggleExists, true);
-        }
-
-        // Create device with an invalid toggle name
-        {
-            const char* kInvalidToggleName = "!@#$%^&*";
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            descriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = &kInvalidToggleName;
-            togglesDesc.forceEnabledTogglesCount = 1;
-
-            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
-            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
-            bool InvalidToggleExists = false;
-            for (const char* toggle : toggleNames) {
-                if (strcmp(toggle, kInvalidToggleName) == 0) {
-                    InvalidToggleExists = true;
-                }
-            }
-            ASSERT_EQ(InvalidToggleExists, false);
-        }
+    // Query with an invalid toggle name
+    {
+        const char* kInvalidToggleName = "!@#$%^&*";
+        const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kInvalidToggleName);
+        ASSERT_EQ(nullptr, toggleInfo);
     }
+}
 
-    TEST_F(ToggleValidationTest, TurnOffVsyncWithToggle) {
-        const char* kValidToggleName = "turn_off_vsync";
+// Tests overriding toggles when creating a device works correctly.
+TEST_F(ToggleValidationTest, OverrideToggleUsage) {
+    // Create device with a valid name of a toggle
+    {
+        const char* kValidToggleName = "emulate_store_and_msaa_resolve";
         wgpu::DeviceDescriptor descriptor;
         wgpu::DawnTogglesDeviceDescriptor togglesDesc;
         descriptor.nextInChain = &togglesDesc;
@@ -102,4 +61,44 @@
         }
         ASSERT_EQ(validToggleExists, true);
     }
+
+    // Create device with an invalid toggle name
+    {
+        const char* kInvalidToggleName = "!@#$%^&*";
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledToggles = &kInvalidToggleName;
+        togglesDesc.forceEnabledTogglesCount = 1;
+
+        WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+        std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+        bool InvalidToggleExists = false;
+        for (const char* toggle : toggleNames) {
+            if (strcmp(toggle, kInvalidToggleName) == 0) {
+                InvalidToggleExists = true;
+            }
+        }
+        ASSERT_EQ(InvalidToggleExists, false);
+    }
+}
+
+TEST_F(ToggleValidationTest, TurnOffVsyncWithToggle) {
+    const char* kValidToggleName = "turn_off_vsync";
+    wgpu::DeviceDescriptor descriptor;
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+    descriptor.nextInChain = &togglesDesc;
+    togglesDesc.forceEnabledToggles = &kValidToggleName;
+    togglesDesc.forceEnabledTogglesCount = 1;
+
+    WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+    std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+    bool validToggleExists = false;
+    for (const char* toggle : toggleNames) {
+        if (strcmp(toggle, kValidToggleName) == 0) {
+            validToggleExists = true;
+        }
+    }
+    ASSERT_EQ(validToggleExists, true);
+}
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
index 0fee9c6..95b5645 100644
--- a/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
@@ -21,7 +21,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using testing::HasSubstr;
+using testing::HasSubstr;
 }  // anonymous namespace
 
 class UnsafeAPIValidationTest : public ValidationTest {
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.cpp b/src/dawn/tests/unittests/validation/ValidationTest.cpp
index d027057..b7b23b2 100644
--- a/src/dawn/tests/unittests/validation/ValidationTest.cpp
+++ b/src/dawn/tests/unittests/validation/ValidationTest.cpp
@@ -26,9 +26,10 @@
 
 namespace {
 
-    bool gUseWire = false;
-    std::string gWireTraceDir = "";
-    std::unique_ptr<ToggleParser> gToggleParser = nullptr;
+bool gUseWire = false;
+// NOLINTNEXTLINE(runtime/string)
+std::string gWireTraceDir = "";
+std::unique_ptr<ToggleParser> gToggleParser = nullptr;
 
 }  // namespace
 
@@ -79,8 +80,7 @@
 }
 
 ValidationTest::ValidationTest()
-    : mWireHelper(utils::CreateWireHelper(gUseWire, gWireTraceDir.c_str())) {
-}
+    : mWireHelper(utils::CreateWireHelper(gUseWire, gWireTraceDir.c_str())) {}
 
 void ValidationTest::SetUp() {
     instance = std::make_unique<dawn::native::Instance>();
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.h b/src/dawn/tests/unittests/validation/ValidationTest.h
index e202772..f373b28 100644
--- a/src/dawn/tests/unittests/validation/ValidationTest.h
+++ b/src/dawn/tests/unittests/validation/ValidationTest.h
@@ -18,11 +18,11 @@
 #include <memory>
 #include <string>
 
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
 #include "dawn/common/Log.h"
 #include "dawn/native/DawnNative.h"
 #include "dawn/webgpu_cpp.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
 
 // Argument helpers to allow macro overriding.
 #define UNIMPLEMENTED_MACRO(...) UNREACHABLE()
@@ -88,7 +88,7 @@
 #define EXPECT_DEPRECATION_WARNING(statement) EXPECT_DEPRECATION_WARNINGS(statement, 1)
 
 namespace utils {
-    class WireHelper;
+class WireHelper;
 }  // namespace utils
 
 void InitDawnValidationTestEnvironment(int argc, char** argv);
diff --git a/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
index 088ed60..6075b94 100644
--- a/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
@@ -19,326 +19,322 @@
 
 namespace {
 
-    class VideoViewsValidation : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnMultiPlanarFormats};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::Texture CreateVideoTextureForTest(wgpu::TextureFormat format,
-                                                wgpu::TextureUsage usage) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = 1;
-            descriptor.size.height = 1;
-            descriptor.format = format;
-            descriptor.usage = usage;
-            return device.CreateTexture(&descriptor);
-        }
-    };
-
-    // Test texture views compatibility rules.
-    TEST_F(VideoViewsValidation, CreateViewFails) {
-        wgpu::Texture videoTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-
-        // Correct plane index but incompatible view format.
-        viewDesc.format = wgpu::TextureFormat::R8Uint;
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Compatible view format but wrong plane index.
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Compatible view format but wrong aspect.
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        viewDesc.aspect = wgpu::TextureAspect::All;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Create a single plane texture.
-        wgpu::TextureDescriptor desc;
-        desc.format = wgpu::TextureFormat::RGBA8Unorm;
-        desc.dimension = wgpu::TextureDimension::e2D;
-        desc.usage = wgpu::TextureUsage::TextureBinding;
-        desc.size = {1, 1, 1};
-
-        wgpu::Texture texture = device.CreateTexture(&desc);
-
-        // Plane aspect specified with non-planar texture.
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        // Planar views with non-planar texture.
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        viewDesc.format = wgpu::TextureFormat::RG8Unorm;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+class VideoViewsValidation : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnMultiPlanarFormats};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
     }
 
-    // Test texture views compatibility rules.
-    TEST_F(VideoViewsValidation, CreateViewSucceeds) {
-        wgpu::Texture yuvTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        // Per plane view formats unspecified.
-        wgpu::TextureViewDescriptor planeViewDesc = {};
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        wgpu::TextureView plane0View = yuvTexture.CreateView(&planeViewDesc);
-
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        wgpu::TextureView plane1View = yuvTexture.CreateView(&planeViewDesc);
-
-        ASSERT_NE(plane0View.Get(), nullptr);
-        ASSERT_NE(plane1View.Get(), nullptr);
-
-        // Per plane view formats specified.
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        planeViewDesc.format = wgpu::TextureFormat::R8Unorm;
-        plane0View = yuvTexture.CreateView(&planeViewDesc);
-
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        planeViewDesc.format = wgpu::TextureFormat::RG8Unorm;
-        plane1View = yuvTexture.CreateView(&planeViewDesc);
-
-        ASSERT_NE(plane0View.Get(), nullptr);
-        ASSERT_NE(plane1View.Get(), nullptr);
+    wgpu::Texture CreateVideoTextureForTest(wgpu::TextureFormat format, wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = 1;
+        descriptor.size.height = 1;
+        descriptor.format = format;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
     }
+};
 
-    // Test copying from one multi-planar format into another fails.
-    TEST_F(VideoViewsValidation, T2TCopyAllAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Test texture views compatibility rules.
+TEST_F(VideoViewsValidation, CreateViewFails) {
+    wgpu::Texture videoTexture = CreateVideoTextureForTest(
+        wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    wgpu::TextureViewDescriptor viewDesc = {};
 
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+    // Correct plane index but incompatible view format.
+    viewDesc.format = wgpu::TextureFormat::R8Uint;
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+    // Compatible view format but wrong plane index.
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    // Compatible view format but wrong aspect.
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::All;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
+    // Create a single plane texture.
+    wgpu::TextureDescriptor desc;
+    desc.format = wgpu::TextureFormat::RGBA8Unorm;
+    desc.dimension = wgpu::TextureDimension::e2D;
+    desc.usage = wgpu::TextureUsage::TextureBinding;
+    desc.size = {1, 1, 1};
+
+    wgpu::Texture texture = device.CreateTexture(&desc);
+
+    // Plane aspect specified with non-planar texture.
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    // Planar views with non-planar texture.
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    viewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+}
+
+// Test texture views compatibility rules.
+TEST_F(VideoViewsValidation, CreateViewSucceeds) {
+    wgpu::Texture yuvTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    // Per plane view formats unspecified.
+    wgpu::TextureViewDescriptor planeViewDesc = {};
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    wgpu::TextureView plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    wgpu::TextureView plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+    ASSERT_NE(plane0View.Get(), nullptr);
+    ASSERT_NE(plane1View.Get(), nullptr);
+
+    // Per plane view formats specified.
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    planeViewDesc.format = wgpu::TextureFormat::R8Unorm;
+    plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    planeViewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+    ASSERT_NE(plane0View.Get(), nullptr);
+    ASSERT_NE(plane1View.Get(), nullptr);
+}
+
+// Test copying from one multi-planar format into another fails.
+TEST_F(VideoViewsValidation, T2TCopyAllAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+    wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from one multi-planar format into another per plane fails.
+TEST_F(VideoViewsValidation, T2TCopyPlaneAspectFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from one multi-planar format into another per plane fails.
-    TEST_F(VideoViewsValidation, T2TCopyPlaneAspectFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
-            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
-            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Test copying from a multi-planar format to a buffer fails.
-    TEST_F(VideoViewsValidation, T2BCopyAllAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Test copying from a multi-planar format to a buffer fails.
+TEST_F(VideoViewsValidation, T2BCopyAllAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
 
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = 1;
-        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 1;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
 
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
 
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    wgpu::Extent3D copySize = {1, 1, 1};
 
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from multi-planar format per plane to a buffer fails.
+TEST_F(VideoViewsValidation, T2BCopyPlaneAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 1;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from multi-planar format per plane to a buffer fails.
-    TEST_F(VideoViewsValidation, T2BCopyPlaneAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = 1;
-        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
-
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
-            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Test copying from a buffer to a multi-planar format fails.
-    TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
-        std::vector<uint8_t> placeholderData(4, 0);
+// Test copying from a buffer to a multi-planar format fails.
+TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
+    std::vector<uint8_t> placeholderData(4, 0);
 
-        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
-            device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+        device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
 
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
 
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    wgpu::Extent3D copySize = {1, 1, 1};
 
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from a buffer to a multi-planar format per plane fails.
+TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
+    std::vector<uint8_t> placeholderData(4, 0);
+
+    wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+        device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from a buffer to a multi-planar format per plane fails.
-    TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
-        std::vector<uint8_t> placeholderData(4, 0);
+    copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
-            device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
-
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
-
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
-            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Tests which multi-planar formats are allowed to be sampled.
-    TEST_F(VideoViewsValidation, SamplingMultiPlanarTexture) {
-        wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+// Tests which multi-planar formats are allowed to be sampled.
+TEST_F(VideoViewsValidation, SamplingMultiPlanarTexture) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
 
-        // R8BG8Biplanar420Unorm is allowed to be sampled, if plane 0 or plane 1 is selected.
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    // R8BG8Biplanar420Unorm is allowed to be sampled, if plane 0 or plane 1 is selected.
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureViewDescriptor desc = {};
+    wgpu::TextureViewDescriptor desc = {};
 
-        desc.aspect = wgpu::TextureAspect::Plane0Only;
-        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+    desc.aspect = wgpu::TextureAspect::Plane0Only;
+    utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
 
-        desc.aspect = wgpu::TextureAspect::Plane1Only;
-        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
-    }
+    desc.aspect = wgpu::TextureAspect::Plane1Only;
+    utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+}
 
-    // Tests creating a texture with a multi-plane format.
-    TEST_F(VideoViewsValidation, CreateTextureFails) {
-        // multi-planar formats are NOT allowed to be renderable.
-        ASSERT_DEVICE_ERROR(CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
-                                                      wgpu::TextureUsage::RenderAttachment));
-    }
+// Tests creating a texture with a multi-plane format.
+TEST_F(VideoViewsValidation, CreateTextureFails) {
+    // multi-planar formats are NOT allowed to be renderable.
+    ASSERT_DEVICE_ERROR(CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                  wgpu::TextureUsage::RenderAttachment));
+}
 
-    // Tests writing into a multi-planar format fails.
-    TEST_F(VideoViewsValidation, WriteTextureAllAspectsFails) {
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Tests writing into a multi-planar format fails.
+TEST_F(VideoViewsValidation, WriteTextureAllAspectsFails) {
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 4, 4);
+    wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 4, 4);
 
-        wgpu::ImageCopyTexture imageCopyTexture =
-            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
 
-        std::vector<uint8_t> placeholderData(4, 0);
-        wgpu::Extent3D writeSize = {1, 1, 1};
+    std::vector<uint8_t> placeholderData(4, 0);
+    wgpu::Extent3D writeSize = {1, 1, 1};
 
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
-                                               placeholderData.size(), &textureDataLayout,
-                                               &writeSize));
-    }
+    ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
+                                           placeholderData.size(), &textureDataLayout, &writeSize));
+}
 
-    // Tests writing into a multi-planar format per plane fails.
-    TEST_F(VideoViewsValidation, WriteTexturePlaneAspectsFails) {
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Tests writing into a multi-planar format per plane fails.
+TEST_F(VideoViewsValidation, WriteTexturePlaneAspectsFails) {
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 12, 4);
-        wgpu::ImageCopyTexture imageCopyTexture =
-            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+    wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 12, 4);
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
 
-        std::vector<uint8_t> placeholderData(4, 0);
-        wgpu::Extent3D writeSize = {1, 1, 1};
+    std::vector<uint8_t> placeholderData(4, 0);
+    wgpu::Extent3D writeSize = {1, 1, 1};
 
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
-                                               placeholderData.size(), &textureDataLayout,
-                                               &writeSize));
-    }
+    ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
+                                           placeholderData.size(), &textureDataLayout, &writeSize));
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/WriteBufferTests.cpp b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
index a3a5f89..6876854 100644
--- a/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
+++ b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
@@ -20,86 +20,86 @@
 
 namespace {
 
-    class WriteBufferTest : public ValidationTest {
-      public:
-        wgpu::Buffer CreateWritableBuffer(uint64_t size) {
-            wgpu::BufferDescriptor desc;
-            desc.usage = wgpu::BufferUsage::CopyDst;
-            desc.size = size;
-            return device.CreateBuffer(&desc);
-        }
-
-        wgpu::CommandBuffer EncodeWriteBuffer(wgpu::Buffer buffer,
-                                              uint64_t bufferOffset,
-                                              uint64_t size) {
-            std::vector<uint8_t> data(size);
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.WriteBuffer(buffer, bufferOffset, data.data(), size);
-            return encoder.Finish();
-        }
-    };
-
-    // Tests that the buffer offset is validated to be a multiple of 4 bytes.
-    TEST_F(WriteBufferTest, OffsetAlignment) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 4);
-        EncodeWriteBuffer(buffer, 4, 4);
-        EncodeWriteBuffer(buffer, 60, 4);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 1, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 2, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 3, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 5, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 11, 4));
-    }
-
-    // Tests that the buffer size is validated to be a multiple of 4 bytes.
-    TEST_F(WriteBufferTest, SizeAlignment) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 64);
-        EncodeWriteBuffer(buffer, 4, 60);
-        EncodeWriteBuffer(buffer, 40, 24);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 63));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 1));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 2));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 40, 23));
-    }
-
-    // Tests that the buffer size and offset are validated to fit within the bounds of the buffer.
-    TEST_F(WriteBufferTest, BufferBounds) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 64);
-        EncodeWriteBuffer(buffer, 4, 60);
-        EncodeWriteBuffer(buffer, 40, 24);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 68));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 64));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 60, 8));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 64, 4));
-    }
-
-    // Tests that the destination buffer's usage is validated to contain CopyDst.
-    TEST_F(WriteBufferTest, RequireCopyDstUsage) {
-        wgpu::BufferDescriptor desc;
-        desc.usage = wgpu::BufferUsage::CopySrc;
-        desc.size = 64;
-        wgpu::Buffer buffer = device.CreateBuffer(&desc);
-
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 64));
-    }
-
-    // Tests that the destination buffer's state is validated at submission.
-    TEST_F(WriteBufferTest, ValidBufferState) {
+class WriteBufferTest : public ValidationTest {
+  public:
+    wgpu::Buffer CreateWritableBuffer(uint64_t size) {
         wgpu::BufferDescriptor desc;
         desc.usage = wgpu::BufferUsage::CopyDst;
-        desc.size = 64;
-        desc.mappedAtCreation = true;
-        wgpu::Buffer buffer = device.CreateBuffer(&desc);
-
-        wgpu::CommandBuffer commands = EncodeWriteBuffer(buffer, 0, 64);
-        ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commands));
-
-        commands = EncodeWriteBuffer(buffer, 0, 64);
-        buffer.Unmap();
-        device.GetQueue().Submit(1, &commands);
+        desc.size = size;
+        return device.CreateBuffer(&desc);
     }
 
+    wgpu::CommandBuffer EncodeWriteBuffer(wgpu::Buffer buffer,
+                                          uint64_t bufferOffset,
+                                          uint64_t size) {
+        std::vector<uint8_t> data(size);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteBuffer(buffer, bufferOffset, data.data(), size);
+        return encoder.Finish();
+    }
+};
+
+// Tests that the buffer offset is validated to be a multiple of 4 bytes.
+TEST_F(WriteBufferTest, OffsetAlignment) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 4);
+    EncodeWriteBuffer(buffer, 4, 4);
+    EncodeWriteBuffer(buffer, 60, 4);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 1, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 2, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 3, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 5, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 11, 4));
+}
+
+// Tests that the buffer size is validated to be a multiple of 4 bytes.
+TEST_F(WriteBufferTest, SizeAlignment) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 64);
+    EncodeWriteBuffer(buffer, 4, 60);
+    EncodeWriteBuffer(buffer, 40, 24);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 63));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 1));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 2));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 40, 23));
+}
+
+// Tests that the buffer size and offset are validated to fit within the bounds of the buffer.
+TEST_F(WriteBufferTest, BufferBounds) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 64);
+    EncodeWriteBuffer(buffer, 4, 60);
+    EncodeWriteBuffer(buffer, 40, 24);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 68));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 64));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 60, 8));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 64, 4));
+}
+
+// Tests that the destination buffer's usage is validated to contain CopyDst.
+TEST_F(WriteBufferTest, RequireCopyDstUsage) {
+    wgpu::BufferDescriptor desc;
+    desc.usage = wgpu::BufferUsage::CopySrc;
+    desc.size = 64;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 64));
+}
+
+// Tests that the destination buffer's state is validated at submission.
+TEST_F(WriteBufferTest, ValidBufferState) {
+    wgpu::BufferDescriptor desc;
+    desc.usage = wgpu::BufferUsage::CopyDst;
+    desc.size = 64;
+    desc.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    wgpu::CommandBuffer commands = EncodeWriteBuffer(buffer, 0, 64);
+    ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commands));
+
+    commands = EncodeWriteBuffer(buffer, 0, 64);
+    buffer.Unmap();
+    device.GetQueue().Submit(1, &commands);
+}
+
 }  // namespace
diff --git a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
index 72bda9f..aa56c39 100644
--- a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
@@ -23,318 +23,317 @@
 
 #include "webgpu/webgpu_cpp.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::_;
-    using testing::Invoke;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::SaveArg;
-    using testing::StrEq;
-    using testing::WithArg;
+using testing::_;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::Return;
+using testing::SaveArg;
+using testing::StrEq;
+using testing::WithArg;
 
-    class WireAdapterTests : public WireTest {
-      protected:
-        // Bootstrap the tests and create a fake adapter.
-        void SetUp() override {
-            WireTest::SetUp();
+class WireAdapterTests : public WireTest {
+  protected:
+    // Bootstrap the tests and create a fake adapter.
+    void SetUp() override {
+        WireTest::SetUp();
 
-            auto reservation = GetWireClient()->ReserveInstance();
-            instance = wgpu::Instance::Acquire(reservation.instance);
+        auto reservation = GetWireClient()->ReserveInstance();
+        instance = wgpu::Instance::Acquire(reservation.instance);
 
-            WGPUInstance apiInstance = api.GetNewInstance();
-            EXPECT_CALL(api, InstanceReference(apiInstance));
-            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
-                                                        reservation.generation));
+        WGPUInstance apiInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(apiInstance));
+        EXPECT_TRUE(
+            GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
 
-            wgpu::RequestAdapterOptions options = {};
-            MockCallback<WGPURequestAdapterCallback> cb;
-            auto* userdata = cb.MakeUserdata(this);
-            instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-            // Expect the server to receive the message. Then, mock a fake reply.
-            apiAdapter = api.GetNewAdapter();
-            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-                .WillOnce(InvokeWithoutArgs([&]() {
-                    EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
-                            *properties = {};
-                            properties->name = "";
-                            properties->driverDescription = "";
-                        })));
-
-                    EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                        .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                            *limits = {};
-                            return true;
-                        })));
-
-                    EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                        .WillOnce(Return(0))
-                        .WillOnce(Return(0));
-                    api.CallInstanceRequestAdapterCallback(
-                        apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-                }));
-            FlushClient();
-
-            // Expect the callback in the client.
-            WGPUAdapter cAdapter;
-            EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cAdapter));
-            FlushServer();
-
-            EXPECT_NE(cAdapter, nullptr);
-            adapter = wgpu::Adapter::Acquire(cAdapter);
-        }
-
-        void TearDown() override {
-            adapter = nullptr;
-            instance = nullptr;
-            WireTest::TearDown();
-        }
-
-        WGPUAdapter apiAdapter;
-        wgpu::Instance instance;
-        wgpu::Adapter adapter;
-    };
-
-    // Test that the DeviceDescriptor is passed from the client to the server.
-    TEST_F(WireAdapterTests, RequestDevicePassesDescriptor) {
-        MockCallback<WGPURequestDeviceCallback> cb;
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
         auto* userdata = cb.MakeUserdata(this);
-
-        // Test an empty descriptor
-        {
-            wgpu::DeviceDescriptor desc = {};
-            adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([](const WGPUDeviceDescriptor* apiDesc) {
-                    EXPECT_EQ(apiDesc->label, nullptr);
-                    EXPECT_EQ(apiDesc->requiredFeaturesCount, 0u);
-                    EXPECT_EQ(apiDesc->requiredLimits, nullptr);
-                })));
-            FlushClient();
-        }
-
-        // Test a non-empty descriptor
-        {
-            wgpu::RequiredLimits limits = {};
-            limits.limits.maxStorageTexturesPerShaderStage = 5;
-
-            std::vector<wgpu::FeatureName> features = {wgpu::FeatureName::TextureCompressionETC2,
-                                                       wgpu::FeatureName::TextureCompressionASTC};
-
-            wgpu::DeviceDescriptor desc = {};
-            desc.label = "hello device";
-            desc.requiredLimits = &limits;
-            desc.requiredFeaturesCount = features.size();
-            desc.requiredFeatures = features.data();
-
-            adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([&](const WGPUDeviceDescriptor* apiDesc) {
-                    EXPECT_STREQ(apiDesc->label, desc.label);
-
-                    ASSERT_EQ(apiDesc->requiredFeaturesCount, features.size());
-                    for (uint32_t i = 0; i < features.size(); ++i) {
-                        EXPECT_EQ(apiDesc->requiredFeatures[i],
-                                  static_cast<WGPUFeatureName>(features[i]));
-                    }
-
-                    ASSERT_NE(apiDesc->requiredLimits, nullptr);
-                    EXPECT_EQ(apiDesc->requiredLimits->nextInChain, nullptr);
-                    EXPECT_EQ(apiDesc->requiredLimits->limits.maxStorageTexturesPerShaderStage,
-                              limits.limits.maxStorageTexturesPerShaderStage);
-                })));
-            FlushClient();
-        }
-
-        // Delete the adapter now, or it'll call the mock callback after it's deleted.
-        adapter = nullptr;
-    }
-
-    // Test that RequestDevice forwards the device information to the client.
-    TEST_F(WireAdapterTests, RequestDeviceSuccess) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::SupportedLimits fakeLimits = {};
-        fakeLimits.limits.maxTextureDimension1D = 433;
-        fakeLimits.limits.maxVertexAttributes = 1243;
-
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth32FloatStencil8,
-            wgpu::FeatureName::TextureCompressionBC,
-        };
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
 
         // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUDevice apiDevice = api.GetNewDevice();
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        apiAdapter = api.GetNewAdapter();
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
             .WillOnce(InvokeWithoutArgs([&]() {
-                // Set on device creation to forward callbacks to the client.
-                EXPECT_CALL(api,
-                            OnDeviceSetUncapturedErrorCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
-                EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
-                EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
+                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                        *properties = {};
+                        properties->name = "";
+                        properties->driverDescription = "";
+                    })));
 
-                EXPECT_CALL(api, DeviceGetLimits(apiDevice, NotNull()))
+                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
                     .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                        *limits = {};
                         return true;
                     })));
 
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
-
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
-                                                     apiDevice, nullptr);
-            }));
-        FlushClient();
-
-        // Expect the callback in the client and all the device information to match.
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUDevice cDevice) {
-                wgpu::Device device = wgpu::Device::Acquire(cDevice);
-
-                wgpu::SupportedLimits limits;
-                EXPECT_TRUE(device.GetLimits(&limits));
-                EXPECT_EQ(limits.limits.maxTextureDimension1D,
-                          fakeLimits.limits.maxTextureDimension1D);
-                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
-
-                std::vector<wgpu::FeatureName> features;
-                features.resize(device.EnumerateFeatures(nullptr));
-                ASSERT_EQ(features.size(), fakeFeatures.size());
-                EXPECT_EQ(device.EnumerateFeatures(&features[0]), features.size());
-
-                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
-                for (wgpu::FeatureName feature : features) {
-                    EXPECT_EQ(featureSet.erase(feature), 1u);
-                }
-            })));
-        FlushServer();
-
-        // Cleared when the device is destroyed.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)).Times(1);
-    }
-
-    // Test that features requested that the implementation supports, but not the
-    // wire reject the callback.
-    TEST_F(WireAdapterTests, RequestFeatureUnsupportedByWire) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            // Some value that is not a valid feature
-            static_cast<wgpu::FeatureName>(-2),
-            wgpu::FeatureName::TextureCompressionASTC,
-        };
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock a fake reply.
-        // The reply contains features that the device implementation supports, but the
-        // wire does not.
-        WGPUDevice apiDevice = api.GetNewDevice();
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
-
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-
-                // The device was actually created, but the wire didn't support its features.
-                // Expect it to be released.
-                EXPECT_CALL(api, DeviceRelease(apiDevice));
-
-                // Fake successful creation. The client still receives a failure due to
-                // unsupported features.
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
-                                                     apiDevice, nullptr);
-            }));
-        FlushClient();
-
-        // Expect an error callback since the feature is not supported.
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
-        FlushServer();
-    }
-
-    // Test that RequestDevice errors forward to the client.
-    TEST_F(WireAdapterTests, RequestDeviceError) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock an error.
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Error,
-                                                     nullptr, "Request device failed");
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                    .WillOnce(Return(0))
+                    .WillOnce(Return(0));
+                api.CallInstanceRequestAdapterCallback(
+                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
             }));
         FlushClient();
 
         // Expect the callback in the client.
-        EXPECT_CALL(
-            cb, Call(WGPURequestDeviceStatus_Error, nullptr, StrEq("Request device failed"), this))
-            .Times(1);
+        WGPUAdapter cAdapter;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
         FlushServer();
+
+        EXPECT_NE(cAdapter, nullptr);
+        adapter = wgpu::Adapter::Acquire(cAdapter);
     }
 
-    // Test that RequestDevice receives unknown status if the adapter is deleted
-    // before the callback happens.
-    TEST_F(WireAdapterTests, RequestDeviceAdapterDestroyedBeforeCallback) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    void TearDown() override {
         adapter = nullptr;
+        instance = nullptr;
+        WireTest::TearDown();
     }
 
-    // Test that RequestDevice receives unknown status if the wire is disconnected
-    // before the callback happens.
-    TEST_F(WireAdapterTests, RequestDeviceWireDisconnectedBeforeCallback) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
+    WGPUAdapter apiAdapter;
+    wgpu::Instance instance;
+    wgpu::Adapter adapter;
+};
 
+// Test that the DeviceDescriptor is passed from the client to the server.
+TEST_F(WireAdapterTests, RequestDevicePassesDescriptor) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    // Test an empty descriptor
+    {
         wgpu::DeviceDescriptor desc = {};
         adapter.RequestDevice(&desc, cb.Callback(), userdata);
 
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        GetWireClient()->Disconnect();
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([](const WGPUDeviceDescriptor* apiDesc) {
+                EXPECT_EQ(apiDesc->label, nullptr);
+                EXPECT_EQ(apiDesc->requiredFeaturesCount, 0u);
+                EXPECT_EQ(apiDesc->requiredLimits, nullptr);
+            })));
+        FlushClient();
     }
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+    // Test a non-empty descriptor
+    {
+        wgpu::RequiredLimits limits = {};
+        limits.limits.maxStorageTexturesPerShaderStage = 5;
+
+        std::vector<wgpu::FeatureName> features = {wgpu::FeatureName::TextureCompressionETC2,
+                                                   wgpu::FeatureName::TextureCompressionASTC};
+
+        wgpu::DeviceDescriptor desc = {};
+        desc.label = "hello device";
+        desc.requiredLimits = &limits;
+        desc.requiredFeaturesCount = features.size();
+        desc.requiredFeatures = features.data();
+
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([&](const WGPUDeviceDescriptor* apiDesc) {
+                EXPECT_STREQ(apiDesc->label, desc.label);
+
+                ASSERT_EQ(apiDesc->requiredFeaturesCount, features.size());
+                for (uint32_t i = 0; i < features.size(); ++i) {
+                    EXPECT_EQ(apiDesc->requiredFeatures[i],
+                              static_cast<WGPUFeatureName>(features[i]));
+                }
+
+                ASSERT_NE(apiDesc->requiredLimits, nullptr);
+                EXPECT_EQ(apiDesc->requiredLimits->nextInChain, nullptr);
+                EXPECT_EQ(apiDesc->requiredLimits->limits.maxStorageTexturesPerShaderStage,
+                          limits.limits.maxStorageTexturesPerShaderStage);
+            })));
+        FlushClient();
+    }
+
+    // Delete the adapter now, or it'll call the mock callback after it's deleted.
+    adapter = nullptr;
+}
+
+// Test that RequestDevice forwards the device information to the client.
+TEST_F(WireAdapterTests, RequestDeviceSuccess) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::SupportedLimits fakeLimits = {};
+    fakeLimits.limits.maxTextureDimension1D = 433;
+    fakeLimits.limits.maxVertexAttributes = 1243;
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth32FloatStencil8,
+        wgpu::FeatureName::TextureCompressionBC,
+    };
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUDevice apiDevice = api.GetNewDevice();
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            // Set on device creation to forward callbacks to the client.
+            EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, NotNull(), NotNull()))
+                .Times(1);
+            EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, NotNull(), NotNull())).Times(1);
+            EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, NotNull(), NotNull()))
+                .Times(1);
+
+            EXPECT_CALL(api, DeviceGetLimits(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                    return true;
+                })));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                 apiDevice, nullptr);
+        }));
+    FlushClient();
+
+    // Expect the callback in the client and all the device information to match.
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUDevice cDevice) {
+            wgpu::Device device = wgpu::Device::Acquire(cDevice);
+
+            wgpu::SupportedLimits limits;
+            EXPECT_TRUE(device.GetLimits(&limits));
+            EXPECT_EQ(limits.limits.maxTextureDimension1D, fakeLimits.limits.maxTextureDimension1D);
+            EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+
+            std::vector<wgpu::FeatureName> features;
+            features.resize(device.EnumerateFeatures(nullptr));
+            ASSERT_EQ(features.size(), fakeFeatures.size());
+            EXPECT_EQ(device.EnumerateFeatures(&features[0]), features.size());
+
+            std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+            for (wgpu::FeatureName feature : features) {
+                EXPECT_EQ(featureSet.erase(feature), 1u);
+            }
+        })));
+    FlushServer();
+
+    // Cleared when the device is destroyed.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)).Times(1);
+}
+
+// Test that features requested that the implementation supports, but not the
+// wire reject the callback.
+TEST_F(WireAdapterTests, RequestFeatureUnsupportedByWire) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        // Some value that is not a valid feature
+        static_cast<wgpu::FeatureName>(-2),
+        wgpu::FeatureName::TextureCompressionASTC,
+    };
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    // The reply contains features that the device implementation supports, but the
+    // wire does not.
+    WGPUDevice apiDevice = api.GetNewDevice();
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+
+            // The device was actually created, but the wire didn't support its features.
+            // Expect it to be released.
+            EXPECT_CALL(api, DeviceRelease(apiDevice));
+
+            // Fake successful creation. The client still receives a failure due to
+            // unsupported features.
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                 apiDevice, nullptr);
+        }));
+    FlushClient();
+
+    // Expect an error callback since the feature is not supported.
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+    FlushServer();
+}
+
+// Test that RequestDevice errors forward to the client.
+TEST_F(WireAdapterTests, RequestDeviceError) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock an error.
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Error, nullptr,
+                                                 "Request device failed");
+        }));
+    FlushClient();
+
+    // Expect the callback in the client.
+    EXPECT_CALL(cb,
+                Call(WGPURequestDeviceStatus_Error, nullptr, StrEq("Request device failed"), this))
+        .Times(1);
+    FlushServer();
+}
+
+// Test that RequestDevice receives unknown status if the adapter is deleted
+// before the callback happens.
+TEST_F(WireAdapterTests, RequestDeviceAdapterDestroyedBeforeCallback) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    adapter = nullptr;
+}
+
+// Test that RequestDevice receives unknown status if the wire is disconnected
+// before the callback happens.
+TEST_F(WireAdapterTests, RequestDeviceWireDisconnectedBeforeCallback) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
index c208085..4f3be8b 100644
--- a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
@@ -15,256 +15,244 @@
 #include <array>
 #include <string>
 
-#include "dawn/tests/unittests/wire/WireTest.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/wire/WireTest.h"
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Return;
-    using testing::Sequence;
+using testing::_;
+using testing::Return;
+using testing::Sequence;
 
-    class WireArgumentTests : public WireTest {
-      public:
-        WireArgumentTests() {
-        }
-        ~WireArgumentTests() override = default;
-    };
+class WireArgumentTests : public WireTest {
+  public:
+    WireArgumentTests() {}
+    ~WireArgumentTests() override = default;
+};
 
-    // Test that the wire is able to send numerical values
-    TEST_F(WireArgumentTests, ValueArgument) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
-        wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
+// Test that the wire is able to send numerical values
+TEST_F(WireArgumentTests, ValueArgument) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+    wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
-        EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
-            .WillOnce(Return(apiPass));
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
 
-        EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
+    EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send arrays of numerical values
-    TEST_F(WireArgumentTests, ValueArrayArgument) {
-        // Create a bindgroup.
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = 0;
-        bglDescriptor.entries = nullptr;
+// Test that the wire is able to send arrays of numerical values
+TEST_F(WireArgumentTests, ValueArrayArgument) {
+    // Create a bindgroup.
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
 
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
 
-        WGPUBindGroupDescriptor bindGroupDescriptor = {};
-        bindGroupDescriptor.layout = bgl;
-        bindGroupDescriptor.entryCount = 0;
-        bindGroupDescriptor.entries = nullptr;
+    WGPUBindGroupDescriptor bindGroupDescriptor = {};
+    bindGroupDescriptor.layout = bgl;
+    bindGroupDescriptor.entryCount = 0;
+    bindGroupDescriptor.entries = nullptr;
 
-        WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
-        WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
-        EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
+    WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
+    WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
 
-        // Use the bindgroup in SetBindGroup that takes an array of value offsets.
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+    // Use the bindgroup in SetBindGroup that takes an array of value offsets.
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
 
-        std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
-        wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(),
-                                           testOffsets.data());
+    std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
+    wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data());
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
-        EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
-            .WillOnce(Return(apiPass));
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
 
-        EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
-                             apiPass, 0, apiBindGroup, testOffsets.size(),
-                             MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
-                                 for (size_t i = 0; i < testOffsets.size(); i++) {
-                                     if (offsets[i] != testOffsets[i]) {
-                                         return false;
-                                     }
+    EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
+                         apiPass, 0, apiBindGroup, testOffsets.size(),
+                         MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
+                             for (size_t i = 0; i < testOffsets.size(); i++) {
+                                 if (offsets[i] != testOffsets[i]) {
+                                     return false;
                                  }
-                                 return true;
-                             })));
+                             }
+                             return true;
+                         })));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send C strings
-    TEST_F(WireArgumentTests, CStringArgument) {
-        // Create shader module
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test that the wire is able to send C strings
+TEST_F(WireArgumentTests, CStringArgument) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        // Create the color state descriptor
-        WGPUBlendComponent blendComponent = {};
-        blendComponent.operation = WGPUBlendOperation_Add;
-        blendComponent.srcFactor = WGPUBlendFactor_One;
-        blendComponent.dstFactor = WGPUBlendFactor_One;
-        WGPUBlendState blendState = {};
-        blendState.alpha = blendComponent;
-        blendState.color = blendComponent;
-        WGPUColorTargetState colorTargetState = {};
-        colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
-        colorTargetState.blend = &blendState;
-        colorTargetState.writeMask = WGPUColorWriteMask_All;
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
 
-        // Create the depth-stencil state
-        WGPUStencilFaceState stencilFace = {};
-        stencilFace.compare = WGPUCompareFunction_Always;
-        stencilFace.failOp = WGPUStencilOperation_Keep;
-        stencilFace.depthFailOp = WGPUStencilOperation_Keep;
-        stencilFace.passOp = WGPUStencilOperation_Keep;
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
 
-        WGPUDepthStencilState depthStencilState = {};
-        depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
-        depthStencilState.depthWriteEnabled = false;
-        depthStencilState.depthCompare = WGPUCompareFunction_Always;
-        depthStencilState.stencilBack = stencilFace;
-        depthStencilState.stencilFront = stencilFace;
-        depthStencilState.stencilReadMask = 0xff;
-        depthStencilState.stencilWriteMask = 0xff;
-        depthStencilState.depthBias = 0;
-        depthStencilState.depthBiasSlopeScale = 0.0;
-        depthStencilState.depthBiasClamp = 0.0;
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
 
-        // Create the pipeline layout
-        WGPUPipelineLayoutDescriptor layoutDescriptor = {};
-        layoutDescriptor.bindGroupLayoutCount = 0;
-        layoutDescriptor.bindGroupLayouts = nullptr;
-        WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
-        WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
 
-        // Create pipeline
-        WGPURenderPipelineDescriptor pipelineDescriptor = {};
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
 
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.vertex.bufferCount = 0;
-        pipelineDescriptor.vertex.buffers = nullptr;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        fragment.targetCount = 1;
-        fragment.targets = &colorTargetState;
-        pipelineDescriptor.fragment = &fragment;
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
 
-        pipelineDescriptor.multisample.count = 1;
-        pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
-        pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
-        pipelineDescriptor.layout = layout;
-        pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
-        pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
-        pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
-        pipelineDescriptor.depthStencil = &depthStencilState;
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+    pipelineDescriptor.depthStencil = &depthStencilState;
 
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
 
-        WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
-        EXPECT_CALL(
-            api, DeviceCreateRenderPipeline(
-                     apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                         return desc->vertex.entryPoint == std::string("main");
-                     })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->vertex.entryPoint == std::string("main");
+                    })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send objects as value arguments
-    TEST_F(WireArgumentTests, ObjectAsValueArgument) {
+// Test that the wire is able to send objects as value arguments
+TEST_F(WireArgumentTests, ObjectAsValueArgument) {
+    WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 8;
+    descriptor.usage =
+        static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+        .WillOnce(Return(apiBuffer))
+        .RetiresOnSaturation();
+
+    wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
+    EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send array of objects
+TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
+    WGPUCommandBuffer cmdBufs[2];
+    WGPUCommandBuffer apiCmdBufs[2];
+
+    // Create two command buffers we need to use a GMock sequence otherwise the order of the
+    // CreateCommandEncoder might be swapped since they are equivalent in term of matchers
+    Sequence s;
+    for (int i = 0; i < 2; ++i) {
         WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+        cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
+
+        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
         EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+            .InSequence(s)
+            .WillOnce(Return(apiCmdBufEncoder));
 
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 8;
-        descriptor.usage =
-            static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-            .WillOnce(Return(apiBuffer))
-            .RetiresOnSaturation();
-
-        wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
-        EXPECT_CALL(api,
-                    CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
-
-        FlushClient();
+        apiCmdBufs[i] = api.GetNewCommandBuffer();
+        EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
+            .WillOnce(Return(apiCmdBufs[i]));
     }
 
-    // Test that the wire is able to send array of objects
-    TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
-        WGPUCommandBuffer cmdBufs[2];
-        WGPUCommandBuffer apiCmdBufs[2];
+    // Submit command buffer and check we got a call with both API-side command buffers
+    wgpuQueueSubmit(queue, 2, cmdBufs);
 
-        // Create two command buffers we need to use a GMock sequence otherwise the order of the
-        // CreateCommandEncoder might be swapped since they are equivalent in term of matchers
-        Sequence s;
-        for (int i = 0; i < 2; ++i) {
-            WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-            cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
+    EXPECT_CALL(
+        api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
+                             return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
+                         })));
 
-            WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-            EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-                .InSequence(s)
-                .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+}
 
-            apiCmdBufs[i] = api.GetNewCommandBuffer();
-            EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
-                .WillOnce(Return(apiCmdBufs[i]));
-        }
+// Test that the wire is able to send structures that contain pure values (non-objects)
+TEST_F(WireArgumentTests, StructureOfValuesArgument) {
+    WGPUSamplerDescriptor descriptor = {};
+    descriptor.magFilter = WGPUFilterMode_Linear;
+    descriptor.minFilter = WGPUFilterMode_Nearest;
+    descriptor.mipmapFilter = WGPUFilterMode_Linear;
+    descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
+    descriptor.addressModeV = WGPUAddressMode_Repeat;
+    descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
+    descriptor.lodMinClamp = kLodMin;
+    descriptor.lodMaxClamp = kLodMax;
+    descriptor.compare = WGPUCompareFunction_Never;
 
-        // Submit command buffer and check we got a call with both API-side command buffers
-        wgpuQueueSubmit(queue, 2, cmdBufs);
+    wgpuDeviceCreateSampler(device, &descriptor);
 
-        EXPECT_CALL(
-            api,
-            QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
-                            return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
-                        })));
-
-        FlushClient();
-    }
-
-    // Test that the wire is able to send structures that contain pure values (non-objects)
-    TEST_F(WireArgumentTests, StructureOfValuesArgument) {
-        WGPUSamplerDescriptor descriptor = {};
-        descriptor.magFilter = WGPUFilterMode_Linear;
-        descriptor.minFilter = WGPUFilterMode_Nearest;
-        descriptor.mipmapFilter = WGPUFilterMode_Linear;
-        descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
-        descriptor.addressModeV = WGPUAddressMode_Repeat;
-        descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
-        descriptor.lodMinClamp = kLodMin;
-        descriptor.lodMaxClamp = kLodMax;
-        descriptor.compare = WGPUCompareFunction_Never;
-
-        wgpuDeviceCreateSampler(device, &descriptor);
-
-        WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
-        EXPECT_CALL(
-            api, DeviceCreateSampler(
-                     apiDevice,
-                     MatchesLambda(
-                         [](const WGPUSamplerDescriptor* desc) -> bool {
+    WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(
+                         apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
                              return desc->nextInChain == nullptr &&
                                     desc->magFilter == WGPUFilterMode_Linear &&
                                     desc->minFilter == WGPUFilterMode_Nearest &&
@@ -275,111 +263,110 @@
                                     desc->compare == WGPUCompareFunction_Never &&
                                     desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
                          })))
-            .WillOnce(Return(apiPlaceholderSampler));
+        .WillOnce(Return(apiPlaceholderSampler));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send structures that contain objects
-    TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = 0;
-        bglDescriptor.entries = nullptr;
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
 
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
 
-        WGPUPipelineLayoutDescriptor descriptor = {};
-        descriptor.bindGroupLayoutCount = 1;
-        descriptor.bindGroupLayouts = &bgl;
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &bgl;
 
-        wgpuDeviceCreatePipelineLayout(device, &descriptor);
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
 
-        WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(
-            api, DeviceCreatePipelineLayout(
-                     apiDevice,
-                     MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
-                         return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
-                                desc->bindGroupLayouts[0] == apiBgl;
-                     })))
-            .WillOnce(Return(apiPlaceholderLayout));
+    WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(
+                         apiDevice,
+                         MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr &&
+                                    desc->bindGroupLayoutCount == 1 &&
+                                    desc->bindGroupLayouts[0] == apiBgl;
+                         })))
+        .WillOnce(Return(apiPlaceholderLayout));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send structures that contain objects
-    TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
-        static constexpr int NUM_BINDINGS = 3;
-        WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
-            {nullptr,
-             0,
-             WGPUShaderStage_Vertex,
-             {},
-             {nullptr, WGPUSamplerBindingType_Filtering},
-             {},
-             {}},
-            {nullptr,
-             1,
-             WGPUShaderStage_Vertex,
-             {},
-             {},
-             {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
-             {}},
-            {nullptr,
-             2,
-             static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
-             {nullptr, WGPUBufferBindingType_Uniform, false, 0},
-             {},
-             {},
-             {}},
-        };
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = NUM_BINDINGS;
-        bglDescriptor.entries = entries;
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
+    static constexpr int NUM_BINDINGS = 3;
+    WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
+        {nullptr,
+         0,
+         WGPUShaderStage_Vertex,
+         {},
+         {nullptr, WGPUSamplerBindingType_Filtering},
+         {},
+         {}},
+        {nullptr,
+         1,
+         WGPUShaderStage_Vertex,
+         {},
+         {},
+         {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
+         {}},
+        {nullptr,
+         2,
+         static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
+         {nullptr, WGPUBufferBindingType_Uniform, false, 0},
+         {},
+         {},
+         {}},
+    };
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = NUM_BINDINGS;
+    bglDescriptor.entries = entries;
 
-        wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api,
-                    DeviceCreateBindGroupLayout(
-                        apiDevice,
-                        MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
-                            for (int i = 0; i < NUM_BINDINGS; ++i) {
-                                const auto& a = desc->entries[i];
-                                const auto& b = entries[i];
-                                if (a.binding != b.binding || a.visibility != b.visibility ||
-                                    a.buffer.type != b.buffer.type ||
-                                    a.sampler.type != b.sampler.type ||
-                                    a.texture.sampleType != b.texture.sampleType) {
-                                    return false;
-                                }
-                            }
-                            return desc->nextInChain == nullptr && desc->entryCount == 3;
-                        })))
-            .WillOnce(Return(apiBgl));
+    wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(
+        api,
+        DeviceCreateBindGroupLayout(
+            apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
+                for (int i = 0; i < NUM_BINDINGS; ++i) {
+                    const auto& a = desc->entries[i];
+                    const auto& b = entries[i];
+                    if (a.binding != b.binding || a.visibility != b.visibility ||
+                        a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type ||
+                        a.texture.sampleType != b.texture.sampleType) {
+                        return false;
+                    }
+                }
+                return desc->nextInChain == nullptr && desc->entryCount == 3;
+            })))
+        .WillOnce(Return(apiBgl));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test passing nullptr instead of objects - array of objects version
-    TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
-        WGPUBindGroupLayout nullBGL = nullptr;
+// Test passing nullptr instead of objects - array of objects version
+TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
+    WGPUBindGroupLayout nullBGL = nullptr;
 
-        WGPUPipelineLayoutDescriptor descriptor = {};
-        descriptor.bindGroupLayoutCount = 1;
-        descriptor.bindGroupLayouts = &nullBGL;
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &nullBGL;
 
-        wgpuDeviceCreatePipelineLayout(device, &descriptor);
-        EXPECT_CALL(
-            api, DeviceCreatePipelineLayout(
-                     apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
-                         return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
-                                desc->bindGroupLayouts[0] == nullptr;
-                     })))
-            .WillOnce(Return(nullptr));
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
+    EXPECT_CALL(api,
+                DeviceCreatePipelineLayout(
+                    apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                        return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
+                               desc->bindGroupLayouts[0] == nullptr;
+                    })))
+        .WillOnce(Return(nullptr));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireBasicTests.cpp b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
index 9a05acf..34a9c7f 100644
--- a/src/dawn/tests/unittests/wire/WireBasicTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
@@ -16,69 +16,67 @@
 
 namespace dawn::wire {
 
-    using testing::Return;
+using testing::Return;
 
-    class WireBasicTests : public WireTest {
-      public:
-        WireBasicTests() {
-        }
-        ~WireBasicTests() override = default;
-    };
+class WireBasicTests : public WireTest {
+  public:
+    WireBasicTests() {}
+    ~WireBasicTests() override = default;
+};
 
-    // One call gets forwarded correctly.
-    TEST_F(WireBasicTests, CallForwarded) {
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// One call gets forwarded correctly.
+TEST_F(WireBasicTests, CallForwarded) {
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that calling methods on a new object works as expected.
-    TEST_F(WireBasicTests, CreateThenCall) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        wgpuCommandEncoderFinish(encoder, nullptr);
+// Test that calling methods on a new object works as expected.
+TEST_F(WireBasicTests, CreateThenCall) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuCommandEncoderFinish(encoder, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
-        EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
-            .WillOnce(Return(apiCmdBuf));
+    WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
+    EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that client reference/release do not call the backend API.
-    TEST_F(WireBasicTests, RefCountKeptInClient) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, RefCountKeptInClient) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        wgpuCommandEncoderReference(encoder);
-        wgpuCommandEncoderRelease(encoder);
+    wgpuCommandEncoderReference(encoder);
+    wgpuCommandEncoderRelease(encoder);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that client reference/release do not call the backend API.
-    TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        wgpuCommandEncoderRelease(encoder);
+    wgpuCommandEncoderRelease(encoder);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
+    EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
index c4fb948..e49ee7d 100644
--- a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
@@ -20,840 +20,801 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Return;
-    using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockBufferMapCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockBufferMapCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
-        void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
-            mockBufferMapCallback->Call(status, userdata);
-        }
+std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    mockBufferMapCallback->Call(status, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireBufferMappingTests : public WireTest {
-      public:
-        WireBufferMappingTests() {
-        }
-        ~WireBufferMappingTests() override = default;
+class WireBufferMappingTests : public WireTest {
+  public:
+    WireBufferMappingTests() {}
+    ~WireBufferMappingTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
-            apiBuffer = api.GetNewBuffer();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockBufferMapCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-        }
-
-        void SetupBuffer(WGPUBufferUsageFlags usage) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = kBufferSize;
-            descriptor.usage = usage;
-
-            buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-                .WillOnce(Return(apiBuffer))
-                .RetiresOnSaturation();
-            FlushClient();
-        }
-
-      protected:
-        static constexpr uint64_t kBufferSize = sizeof(uint32_t);
-        // A successfully created buffer
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-    };
-
-    // Tests specific to mapping for reading
-    class WireBufferMappingReadTests : public WireBufferMappingTests {
-      public:
-        WireBufferMappingReadTests() {
-        }
-        ~WireBufferMappingReadTests() override = default;
-
-        void SetUp() override {
-            WireBufferMappingTests::SetUp();
-
-            SetupBuffer(WGPUBufferUsage_MapRead);
-        }
-    };
-
-    // Check mapping for reading a succesfully created buffer
-    TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
-                                     wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
+        apiBuffer = api.GetNewBuffer();
     }
 
-    // Check that things work correctly when a validation error happens when mapping the buffer for
-    // reading
-    TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
     }
 
-    // Check that the map read callback is called with UNKNOWN when the buffer is destroyed before
-    // the request is finished
-    TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Return success
-        uint32_t bufferContent = 0;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Destroy before the client gets the success, so the callback is called with
-        // DestroyedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-        FlushServer();
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
     }
 
-    // Check the map read callback is called with "UnmappedBeforeCallback" when the map request
-    // would have worked, but Unmap was called
-    TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server.
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer));
-
-        FlushClient();
-
-        // The callback shouldn't get called with success, even when the request succeeded on the
-        // server side
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
-            .Times(1);
-
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
     }
 
-    // Check that even if Unmap() was called early client-side, we correctly surface server-side
-    // validation errors.
-    TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server that the mapAsync call was an error.
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer));
-
-        FlushClient();
-
-        // The callback should be called with the server-side error and not the
-        // UnmappedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Check the map read callback is called with "DestroyedBeforeCallback" when the map request
-    // would have worked, but Destroy was called
-    TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server.
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer));
-
-        FlushClient();
-
-        // The callback shouldn't get called with success, even when the request succeeded on the
-        // server side
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-
-        FlushServer();
-    }
-
-    // Check that even if Destroy() was called early client-side, we correctly surface server-side
-    // validation errors.
-    TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        // Oh no! We are calling Destroy too early! However the callback gets fired only after we
-        // get an answer from the server that the mapAsync call was an error.
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer));
-
-        FlushClient();
-
-        // The callback should be called with the server-side error and not the
-        // DestroyedBeforCallback..
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Check that an error map read while a buffer is already mapped won't changed the result of get
-    // mapped range
-    TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
-        // Successful map
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // Map failure while the buffer is already mapped
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
-                                     wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-    }
-
-    // Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
-    TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
-    // callback
-    TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-    }
-
-    // Tests specific to mapping for writing
-    class WireBufferMappingWriteTests : public WireBufferMappingTests {
-      public:
-        WireBufferMappingWriteTests() {
-        }
-        ~WireBufferMappingWriteTests() override = default;
-
-        void SetUp() override {
-            WireBufferMappingTests::SetUp();
-
-            SetupBuffer(WGPUBufferUsage_MapWrite);
-        }
-    };
-
-    // Check mapping for writing a succesfully created buffer
-    TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t serverBufferContent = 31337;
-        uint32_t updatedContent = 4242;
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&serverBufferContent));
-
-        FlushClient();
-
-        // The map write callback always gets a buffer full of zeroes.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        uint32_t* lastMapWritePointer =
-            static_cast<uint32_t*>(wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
-        ASSERT_EQ(0u, *lastMapWritePointer);
-
-        // Write something to the mapped pointer
-        *lastMapWritePointer = updatedContent;
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // After the buffer is unmapped, the content of the buffer is updated on the server
-        ASSERT_EQ(serverBufferContent, updatedContent);
-    }
-
-    // Check that things work correctly when a validation error happens when mapping the buffer for
-    // writing
-    TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
-    }
-
-    // Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
-    // destroyed before the request is finished
-    TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Return success
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Destroy before the client gets the success, so the callback is called with
-        // DestroyedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-        FlushServer();
-    }
-
-    // Check the map write callback is called with "UnmappedBeforeCallback" when the map request
-    // would have worked, but Unmap was called
-    TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        // Oh no! We are calling Unmap too early!
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferUnmap(buffer);
-
-        // The callback shouldn't get called, even when the request succeeded on the server side
-        FlushServer();
-    }
-
-    // Check that an error map write while a buffer is already mapped
-    TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
-        // Successful map
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // Map failure while the buffer is already mapped
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_NE(nullptr, static_cast<const uint32_t*>(
-                               wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-    }
-
-    // Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
-    TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in
-    // the callback
-    TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-    }
-
-    // Test successful buffer creation with mappedAtCreation=true
-    TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) {
+    void SetupBuffer(WGPUBufferUsageFlags usage) {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
+        descriptor.size = kBufferSize;
+        descriptor.usage = usage;
+
+        buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+        FlushClient();
+    }
+
+  protected:
+    static constexpr uint64_t kBufferSize = sizeof(uint32_t);
+    // A successfully created buffer
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+};
+
+// Tests specific to mapping for reading
+class WireBufferMappingReadTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingReadTests() {}
+    ~WireBufferMappingReadTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapRead);
+    }
+};
+
+// Check mapping for reading a succesfully created buffer
+TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// reading
+TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before
+// the request is finished
+TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 0;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map read callback is called with "UnmappedBeforeCallback" when the map request
+// would have worked, but Unmap was called
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Unmap() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server that the mapAsync call was an error.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the
+    // UnmappedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check the map read callback is called with "DestroyedBeforeCallback" when the map request
+// would have worked, but Destroy was called
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Destroy() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Destroy too early! However the callback gets fired only after we
+    // get an answer from the server that the mapAsync call was an error.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the
+    // DestroyedBeforCallback..
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check that an error map read while a buffer is already mapped won't changed the result of get
+// mapped range
+TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
+// callback
+TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Tests specific to mapping for writing
+class WireBufferMappingWriteTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingWriteTests() {}
+    ~WireBufferMappingWriteTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapWrite);
+    }
+};
+
+// Check mapping for writing a succesfully created buffer
+TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t serverBufferContent = 31337;
+    uint32_t updatedContent = 4242;
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&serverBufferContent));
+
+    FlushClient();
+
+    // The map write callback always gets a buffer full of zeroes.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    uint32_t* lastMapWritePointer =
+        static_cast<uint32_t*>(wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+    ASSERT_EQ(0u, *lastMapWritePointer);
+
+    // Write something to the mapped pointer
+    *lastMapWritePointer = updatedContent;
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // After the buffer is unmapped, the content of the buffer is updated on the server
+    ASSERT_EQ(serverBufferContent, updatedContent);
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// writing
+TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
+// destroyed before the request is finished
+TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map write callback is called with "UnmappedBeforeCallback" when the map request
+// would have worked, but Unmap was called
+TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    // Oh no! We are calling Unmap too early!
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferUnmap(buffer);
+
+    // The callback shouldn't get called, even when the request succeeded on the server side
+    FlushServer();
+}
+
+// Check that an error map write while a buffer is already mapped
+TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in
+// the callback
+TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Test successful buffer creation with mappedAtCreation=true
+TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that releasing a buffer mapped at creation does not call Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that it is valid to map a buffer after it is mapped at creation and unmapped
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.usage = WGPUMapMode_Write;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+}
+
+// Test that it is invalid to map a buffer after mappedAtCreation but before Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client
+// and never gets to the server-side.
+TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
+    size_t kOOMSize = std::numeric_limits<size_t>::max();
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    // Check for CreateBufferMapped.
+    {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.usage = WGPUBufferUsage_CopySrc;
+        descriptor.size = kOOMSize;
         descriptor.mappedAtCreation = true;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
 
-    // Test that releasing a buffer mapped at creation does not call Unmap
-    TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) {
+    // Check for MapRead usage.
+    {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.mappedAtCreation = true;
+        descriptor.usage = WGPUBufferUsage_MapRead;
+        descriptor.size = kOOMSize;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
 
-    // Test that it is valid to map a buffer after it is mapped at creation and unmapped
-    TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
+    // Check for MapWrite usage.
+    {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.usage = WGPUMapMode_Write;
-        descriptor.mappedAtCreation = true;
+        descriptor.usage = WGPUBufferUsage_MapWrite;
+        descriptor.size = kOOMSize;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Test that it is invalid to map a buffer after mappedAtCreation but before Unmap
-    TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.mappedAtCreation = true;
-
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_NE(nullptr, static_cast<const uint32_t*>(
-                               wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
+}
 
-    // Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client
-    // and never gets to the server-side.
-    TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
-        size_t kOOMSize = std::numeric_limits<size_t>::max();
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapThenDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, this);
 
-        // Check for CreateBufferMapped.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_CopySrc;
-            descriptor.size = kOOMSize;
-            descriptor.mappedAtCreation = true;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+    FlushClient();
 
-        // Check for MapRead usage.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_MapRead;
-            descriptor.size = kOOMSize;
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    GetWireClient()->Disconnect();
+}
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
+    SetupBuffer(WGPUMapMode_Read);
 
-        // Check for MapWrite usage.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_MapWrite;
-            descriptor.size = kOOMSize;
+    GetWireClient()->Disconnect();
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireBufferMappingTests* pTest;
+    WGPUBuffer* pTestBuffer;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestBuffer, nullptr);
+
+    mockBufferMapCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t),
+                           ToMockBufferMapCallback, testData->pTest);
     }
+}
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireBufferMappingTests, MapThenDisconnect) {
-        SetupBuffer(WGPUMapMode_Write);
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           this);
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
 
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-        FlushClient();
+    FlushClient();
 
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
-        SetupBuffer(WGPUMapMode_Read);
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
 
-        GetWireClient()->Disconnect();
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1);
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
-    }
+    FlushClient();
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireBufferMappingTests* pTest;
-        WGPUBuffer* pTestBuffer;
-        size_t numRequests;
-    };
-
-    static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status,
-                                                       void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestBuffer, nullptr);
-
-        mockBufferMapCallback->Call(status, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t),
-                               ToMockBufferMapCallback, testData->pTest);
-        }
-    }
-
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) {
-        SetupBuffer(WGPUMapMode_Write);
-        TestData testData = {this, &buffer, 10};
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
-                           ToMockBufferMapCallbackWithNewRequests, &testData);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
-    }
-
-    // Test that requests inside user callbacks before object destruction are called
-    TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
-        TestData testData = {this, &buffer, 10};
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
-                           ToMockBufferMapCallbackWithNewRequests, &testData);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this))
-            .Times(1 + testData.numRequests);
-        wgpuBufferRelease(buffer);
-    }
+    EXPECT_CALL(*mockBufferMapCallback,
+                Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this))
+        .Times(1 + testData.numRequests);
+    wgpuBufferRelease(buffer);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
index e031890..4d7f7cd 100644
--- a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
@@ -18,368 +18,368 @@
 #include "dawn/wire/WireClient.h"
 
 namespace dawn::wire {
-    namespace {
+namespace {
 
-        using testing::_;
-        using testing::InvokeWithoutArgs;
-        using testing::Mock;
-        using testing::Return;
-        using testing::Sequence;
-        using testing::StrEq;
-        using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::Sequence;
+using testing::StrEq;
+using testing::StrictMock;
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockCreateComputePipelineAsyncCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCreatePipelineAsyncStatus status,
-                         WGPUComputePipeline pipeline,
-                         const char* message,
-                         void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockCreateComputePipelineAsyncCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCreatePipelineAsyncStatus status,
+                 WGPUComputePipeline pipeline,
+                 const char* message,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
-            mockCreateComputePipelineAsyncCallback;
-        void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
-                                                      WGPUComputePipeline pipeline,
-                                                      const char* message,
-                                                      void* userdata) {
-            mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
+    mockCreateComputePipelineAsyncCallback;
+void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                              WGPUComputePipeline pipeline,
+                                              const char* message,
+                                              void* userdata) {
+    mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
+}
 
-        class MockCreateRenderPipelineAsyncCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCreatePipelineAsyncStatus status,
-                         WGPURenderPipeline pipeline,
-                         const char* message,
-                         void* userdata));
-        };
+class MockCreateRenderPipelineAsyncCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCreatePipelineAsyncStatus status,
+                 WGPURenderPipeline pipeline,
+                 const char* message,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
-            mockCreateRenderPipelineAsyncCallback;
-        void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
-                                                     WGPURenderPipeline pipeline,
-                                                     const char* message,
-                                                     void* userdata) {
-            mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
+    mockCreateRenderPipelineAsyncCallback;
+void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                             WGPURenderPipeline pipeline,
+                                             const char* message,
+                                             void* userdata) {
+    mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireCreatePipelineAsyncTest : public WireTest {
-      public:
-        void SetUp() override {
-            WireTest::SetUp();
+class WireCreatePipelineAsyncTest : public WireTest {
+  public:
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockCreateComputePipelineAsyncCallback =
-                std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
-            mockCreateRenderPipelineAsyncCallback =
-                std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockCreateComputePipelineAsyncCallback = nullptr;
-            mockCreateRenderPipelineAsyncCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
-        }
-    };
-
-    // Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
-
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
-
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
-            .Times(1);
-
-        FlushServer();
+        mockCreateComputePipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
+        mockCreateRenderPipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
     }
 
-    // Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    void TearDown() override {
+        WireTest::TearDown();
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
-
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+        // Delete mock so that expectations are checked
+        mockCreateComputePipelineAsyncCallback = nullptr;
+        mockCreateRenderPipelineAsyncCallback = nullptr;
     }
 
-    // Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
-
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
-
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
-            .Times(1);
-
-        FlushServer();
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
     }
 
-    // Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
-
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
-
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
     }
+};
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
 
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
 
-        FlushClient();
+    FlushClient();
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    FlushServer();
+}
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
+// Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
 
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
 
-        FlushClient();
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
 
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+    FlushClient();
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    FlushServer();
+}
 
-        FlushClient();
+// Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        GetWireClient()->Disconnect();
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
-            .Times(1);
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-    }
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
+    FlushClient();
 
-        FlushClient();
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
 
-        GetWireClient()->Disconnect();
+    FlushServer();
+}
 
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
-            .Times(1);
+// Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-    }
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
 
-    TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = module;
-        pipelineDescriptor.vertex.entryPoint = "main";
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
 
-        WGPUFragmentState fragment = {};
-        fragment.module = module;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    FlushClient();
 
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
 
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
-        FlushClient();
+    FlushServer();
+}
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
-            .Times(1);
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        wgpuDeviceRelease(device);
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
 
-        // Expect release on all objects created by the client.
-        Sequence s1, s2;
-        EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
-        EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-        FlushClient();
-        DefaultApiDeviceWasReleased();
-    }
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+}
+
+TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = module;
+    pipelineDescriptor.vertex.entryPoint = "main";
+
+    WGPUFragmentState fragment = {};
+    fragment.module = module;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceRelease(device);
+
+    // Expect release on all objects created by the client.
+    Sequence s1, s2;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
+
+    FlushClient();
+    DefaultApiDeviceWasReleased();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
index ad34939..4f2f947 100644
--- a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
@@ -17,47 +17,46 @@
 
 namespace dawn::wire {
 
-    using testing::Return;
-    using testing::Sequence;
+using testing::Return;
+using testing::Sequence;
 
-    class WireDestroyObjectTests : public WireTest {};
+class WireDestroyObjectTests : public WireTest {};
 
-    // Test that destroying the device also destroys child objects.
-    TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that destroying the device also destroys child objects.
+TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        FlushClient();
+    FlushClient();
 
-        // Release the device. It should cause the command encoder to be destroyed.
-        wgpuDeviceRelease(device);
+    // Release the device. It should cause the command encoder to be destroyed.
+    wgpuDeviceRelease(device);
 
-        Sequence s1, s2;
-        // The device and child objects should be released.
-        EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
-        EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
+    Sequence s1, s2;
+    // The device and child objects should be released.
+    EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
+    EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
 
-        FlushClient();
+    FlushClient();
 
-        // Signal that we already released and cleared callbacks for |apiDevice|
-        DefaultApiDeviceWasReleased();
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
 
-        // Using the command encoder should be an error.
-        wgpuCommandEncoderFinish(encoder, nullptr);
-        FlushClient(false);
-    }
+    // Using the command encoder should be an error.
+    wgpuCommandEncoderFinish(encoder, nullptr);
+    FlushClient(false);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
index d8f397c..2218194 100644
--- a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
@@ -20,165 +20,165 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Exactly;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::Return;
-    using testing::Sequence;
-    using testing::StrEq;
+using testing::_;
+using testing::Exactly;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::Return;
+using testing::Sequence;
+using testing::StrEq;
 
-    namespace {
+namespace {
 
-        class WireDisconnectTests : public WireTest {};
+class WireDisconnectTests : public WireTest {};
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    // Test that commands are not received if the client disconnects.
-    TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
-        // Check that commands work at all.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that commands are not received if the client disconnects.
+TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
+    // Check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
-        FlushClient();
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
 
-        // Disconnect.
-        GetWireClient()->Disconnect();
+    // Disconnect.
+    GetWireClient()->Disconnect();
 
-        // Command is not received because client disconnected.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
-        FlushClient();
-    }
+    // Command is not received because client disconnected.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
+    FlushClient();
+}
 
-    // Test that commands that are serialized before a disconnect but flushed
-    // after are received.
-    TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
-        // Check that commands work at all.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that commands that are serialized before a disconnect but flushed
+// after are received.
+TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
+    // Check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        // Disconnect.
-        GetWireClient()->Disconnect();
+    // Disconnect.
+    GetWireClient()->Disconnect();
 
-        // Already-serialized commmands are still received.
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
-        FlushClient();
-    }
+    // Already-serialized commmands are still received.
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+}
 
-    // Check that disconnecting the wire client calls the device lost callback exacty once.
-    TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client calls the device lost callback exacty once.
+TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        // Disconnect the wire client. We should receive device lost only once.
-        EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
-            .Times(Exactly(1));
-        GetWireClient()->Disconnect();
-        GetWireClient()->Disconnect();
-    }
+    // Disconnect the wire client. We should receive device lost only once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
+    GetWireClient()->Disconnect();
+}
 
-    // Check that disconnecting the wire client after a device loss does not trigger the callback
-    // again.
-    TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client after a device loss does not trigger the callback
+// again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "some reason");
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "some reason");
 
-        // Flush the device lost return command.
-        EXPECT_CALL(mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
-            .Times(Exactly(1));
-        FlushServer();
+    // Flush the device lost return command.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
+        .Times(Exactly(1));
+    FlushServer();
 
-        // Disconnect the client. We shouldn't see the lost callback again.
-        EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-        GetWireClient()->Disconnect();
-    }
+    // Disconnect the client. We shouldn't see the lost callback again.
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    GetWireClient()->Disconnect();
+}
 
-    // Check that disconnecting the wire client inside the device loss callback does not trigger the
-    // callback again.
-    TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client inside the device loss callback does not trigger the
+// callback again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "lost reason");
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
 
-        // Disconnect the client inside the lost callback. We should see the callback
-        // only once.
-        EXPECT_CALL(mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-                GetWireClient()->Disconnect();
-            }));
-        FlushServer();
-    }
+    // Disconnect the client inside the lost callback. We should see the callback
+    // only once.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+            GetWireClient()->Disconnect();
+        }));
+    FlushServer();
+}
 
-    // Check that a device loss after a disconnect does not trigger the callback again.
-    TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that a device loss after a disconnect does not trigger the callback again.
+TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        // Disconnect the client. We should see the callback once.
-        EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
-            .Times(Exactly(1));
-        GetWireClient()->Disconnect();
+    // Disconnect the client. We should see the callback once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
 
-        // Lose the device on the server. The client callback shouldn't be
-        // called again.
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "lost reason");
-        EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-        FlushServer();
-    }
+    // Lose the device on the server. The client callback shouldn't be
+    // called again.
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    FlushServer();
+}
 
-    // Test that client objects are all destroyed if the WireClient is destroyed.
-    TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
-        WGPUSamplerDescriptor desc = {};
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
-        wgpuDeviceCreateSampler(device, &desc);
+// Test that client objects are all destroyed if the WireClient is destroyed.
+TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
+    WGPUSamplerDescriptor desc = {};
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuDeviceCreateSampler(device, &desc);
 
-        WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCommandEncoder));
+    WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCommandEncoder));
 
-        WGPUSampler apiSampler = api.GetNewSampler();
-        EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
+    WGPUSampler apiSampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
 
-        FlushClient();
+    FlushClient();
 
-        DeleteClient();
+    DeleteClient();
 
-        // Expect release on all objects created by the client.
-        Sequence s1, s2, s3;
-        EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
-        EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
-        EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
-        FlushClient();
+    // Expect release on all objects created by the client.
+    Sequence s1, s2, s3;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
+    FlushClient();
 
-        // Signal that we already released and cleared callbacks for |apiDevice|
-        DefaultApiDeviceWasReleased();
-    }
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
index 045125d..06a5d88 100644
--- a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
@@ -19,304 +19,293 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::DoAll;
-    using testing::Mock;
-    using testing::Return;
-    using testing::SaveArg;
-    using testing::StrEq;
-    using testing::StrictMock;
+using testing::_;
+using testing::DoAll;
+using testing::Mock;
+using testing::Return;
+using testing::SaveArg;
+using testing::StrEq;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock classes to add expectations on the wire calling callbacks
-        class MockDeviceErrorCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
-        };
+// Mock classes to add expectations on the wire calling callbacks
+class MockDeviceErrorCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
-        void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
-            mockDeviceErrorCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
+void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
+    mockDeviceErrorCallback->Call(type, message, userdata);
+}
 
-        class MockDevicePopErrorScopeCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
-        };
+class MockDevicePopErrorScopeCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>>
-            mockDevicePopErrorScopeCallback;
-        void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
-                                               const char* message,
-                                               void* userdata) {
-            mockDevicePopErrorScopeCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>> mockDevicePopErrorScopeCallback;
+void ToMockDevicePopErrorScopeCallback(WGPUErrorType type, const char* message, void* userdata) {
+    mockDevicePopErrorScopeCallback->Call(type, message, userdata);
+}
 
-        class MockDeviceLoggingCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
-        };
+class MockDeviceLoggingCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
-        void ToMockDeviceLoggingCallback(WGPULoggingType type,
-                                         const char* message,
-                                         void* userdata) {
-            mockDeviceLoggingCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
+void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) {
+    mockDeviceLoggingCallback->Call(type, message, userdata);
+}
 
-        class MockDeviceLostCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUDeviceLostReason reason, const char* message, void* userdata));
-        };
+class MockDeviceLostCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
-        void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
-                                      const char* message,
-                                      void* userdata) {
-            mockDeviceLostCallback->Call(reason, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
+void ToMockDeviceLostCallback(WGPUDeviceLostReason reason, const char* message, void* userdata) {
+    mockDeviceLostCallback->Call(reason, message, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireErrorCallbackTests : public WireTest {
-      public:
-        WireErrorCallbackTests() {
-        }
-        ~WireErrorCallbackTests() override = default;
+class WireErrorCallbackTests : public WireTest {
+  public:
+    WireErrorCallbackTests() {}
+    ~WireErrorCallbackTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
-            mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
-            mockDevicePopErrorScopeCallback =
-                std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
-            mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            mockDeviceErrorCallback = nullptr;
-            mockDeviceLoggingCallback = nullptr;
-            mockDevicePopErrorScopeCallback = nullptr;
-            mockDeviceLostCallback = nullptr;
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-
-            Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
-            Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
-        }
-    };
-
-    // Test the return wire for device error callbacks
-    TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
-        wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
-
-        // Setting the error callback should stay on the client side and do nothing
-        FlushClient();
-
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
-                                                         "Some error message");
-
-        EXPECT_CALL(*mockDeviceErrorCallback,
-                    Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+        mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
+        mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
+        mockDevicePopErrorScopeCallback =
+            std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
+        mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
     }
 
-    // Test the return wire for device user warning callbacks
-    TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
-        wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        // Setting the injected warning callback should stay on the client side and do nothing
-        FlushClient();
-
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
-
-        EXPECT_CALL(*mockDeviceLoggingCallback,
-                    Call(WGPULoggingType_Info, StrEq("Some message"), this))
-            .Times(1);
-
-        FlushServer();
+        mockDeviceErrorCallback = nullptr;
+        mockDeviceLoggingCallback = nullptr;
+        mockDevicePopErrorScopeCallback = nullptr;
+        mockDeviceLostCallback = nullptr;
     }
 
-    // Test the return wire for error scopes.
-    TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    void FlushServer() {
+        WireTest::FlushServer();
+
+        Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
+        Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
+    }
+};
+
+// Test the return wire for device error callbacks
+TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
+    wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
+                                                     "Some error message");
+
+    EXPECT_CALL(*mockDeviceErrorCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for device user warning callbacks
+TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
+    wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
+
+    // Setting the injected warning callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
+
+    EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for error scopes.
+TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    WGPUErrorCallback callback;
+    void* userdata;
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+        .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+    callback(WGPUErrorType_Validation, "Some error message", userdata);
+    FlushServer();
+}
+
+// Test the return wire for error scopes when callbacks return in a various orders.
+TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
+    // Two error scopes are popped, and the first one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         FlushClient();
 
-        WGPUErrorCallback callback;
-        void* userdata;
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
         EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-            .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
         wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
         FlushClient();
 
         EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
             .Times(1);
-        callback(WGPUErrorType_Validation, "Some error message", userdata);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
+            .Times(1);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
         FlushServer();
     }
 
-    // Test the return wire for error scopes when callbacks return in a various orders.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
-        // Two error scopes are popped, and the first one returns first.
-        {
-            EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            FlushClient();
-
-            WGPUErrorCallback callback1;
-            WGPUErrorCallback callback2;
-            void* userdata1;
-            void* userdata2;
-            EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-                .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
-                .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
-            FlushClient();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("First error message"), this))
-                .Times(1);
-            callback1(WGPUErrorType_Validation, "First error message", userdata1);
-            FlushServer();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
-                .Times(1);
-            callback2(WGPUErrorType_Validation, "Second error message", userdata2);
-            FlushServer();
-        }
-
-        // Two error scopes are popped, and the second one returns first.
-        {
-            EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            FlushClient();
-
-            WGPUErrorCallback callback1;
-            WGPUErrorCallback callback2;
-            void* userdata1;
-            void* userdata2;
-            EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-                .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
-                .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
-            FlushClient();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
-                .Times(1);
-            callback2(WGPUErrorType_Validation, "Second error message", userdata2);
-            FlushServer();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("First error message"), this))
-                .Times(1);
-            callback1(WGPUErrorType_Validation, "First error message", userdata1);
-            FlushServer();
-        }
-    }
-
-    // Test the return wire for error scopes in flight when the device is destroyed.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    // Two error scopes are popped, and the second one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         FlushClient();
 
-        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-        FlushClient();
-
-        // Incomplete callback called in Device destructor. This is resolved after the end of this
-        // test.
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
-            .Times(1);
-    }
-
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
-        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-
-        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-        FlushClient();
-
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
-
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
-        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-        FlushClient();
-
-        GetWireClient()->Disconnect();
-
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
-            .Times(1);
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-    }
-
-    // Empty stack (We are emulating the errors that would be callback-ed from native).
-    TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
-        WGPUErrorCallback callback;
-        void* userdata;
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
         EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-            .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
         wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
         FlushClient();
 
         EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
             .Times(1);
-        callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
+            .Times(1);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
         FlushServer();
     }
+}
 
-    // Test the return wire for device lost callback
-    TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
-        wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
+// Test the return wire for error scopes in flight when the device is destroyed.
+TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
 
-        // Setting the error callback should stay on the client side and do nothing
-        FlushClient();
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
 
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "Some error message");
+    // Incomplete callback called in Device destructor. This is resolved after the end of this
+    // test.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
+        .Times(1);
+}
 
-        EXPECT_CALL(*mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
-            .Times(1);
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
 
-        FlushServer();
-    }
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+}
+
+// Empty stack (We are emulating the errors that would be callback-ed from native).
+TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
+    WGPUErrorCallback callback;
+    void* userdata;
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+        .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
+        .Times(1);
+    callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
+    FlushServer();
+}
+
+// Test the return wire for device lost callback
+TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
+    wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "Some error message");
+
+    EXPECT_CALL(*mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
index 63a78c4..6060ce5 100644
--- a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
@@ -16,79 +16,78 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Invoke;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::Unused;
+using testing::_;
+using testing::Invoke;
+using testing::NotNull;
+using testing::Return;
+using testing::Unused;
 
-    class WireExtensionTests : public WireTest {
-      public:
-        WireExtensionTests() {
-        }
-        ~WireExtensionTests() override = default;
-    };
+class WireExtensionTests : public WireTest {
+  public:
+    WireExtensionTests() {}
+    ~WireExtensionTests() override = default;
+};
 
-    // Serialize/Deserializes a chained struct correctly.
-    TEST_F(WireExtensionTests, ChainedStruct) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Serialize/Deserializes a chained struct correctly.
+TEST_F(WireExtensionTests, ChainedStruct) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt.chain.next = nullptr;
-        clientExt.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt.chain.next = nullptr;
+    clientExt.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain);
-                    EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
-                    EXPECT_EQ(ext->chain.next, nullptr);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
 
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Serialize/Deserializes multiple chained structs correctly.
-    TEST_F(WireExtensionTests, MutlipleChainedStructs) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Serialize/Deserializes multiple chained structs correctly.
+TEST_F(WireExtensionTests, MutlipleChainedStructs) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt2 = {};
-        clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt2.chain.next = nullptr;
-        clientExt2.clampDepth = false;
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt2.chain.next = nullptr;
+    clientExt2.clampDepth = false;
 
-        WGPUPrimitiveDepthClampingState clientExt1 = {};
-        clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt1.chain.next = &clientExt2.chain;
-        clientExt1.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-                                 -> WGPURenderPipeline {
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
                 const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
                     serverDesc->primitive.nextInChain);
                 EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
@@ -102,17 +101,17 @@
 
                 return api.GetNewRenderPipeline();
             }));
-        FlushClient();
+    FlushClient();
 
-        // Swap the order of the chained structs.
-        renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
-        clientExt2.chain.next = &clientExt1.chain;
-        clientExt1.chain.next = nullptr;
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-                                 -> WGPURenderPipeline {
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
                 const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
                     serverDesc->primitive.nextInChain);
                 EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
@@ -126,123 +125,123 @@
 
                 return api.GetNewRenderPipeline();
             }));
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that a chained struct with Invalid sType passes through as Invalid.
-    TEST_F(WireExtensionTests, InvalidSType) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that a chained struct with Invalid sType passes through as Invalid.
+TEST_F(WireExtensionTests, InvalidSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = WGPUSType_Invalid;
-        clientExt.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_Invalid;
+    clientExt.chain.next = nullptr;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Test that a chained struct with unknown sType passes through as Invalid.
-    TEST_F(WireExtensionTests, UnknownSType) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that a chained struct with unknown sType passes through as Invalid.
+TEST_F(WireExtensionTests, UnknownSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = static_cast<WGPUSType>(-1);
-        clientExt.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = static_cast<WGPUSType>(-1);
+    clientExt.chain.next = nullptr;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Test that if both an invalid and valid stype are passed on the chain, only the invalid
-    // sType passes through as Invalid.
-    TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that if both an invalid and valid stype are passed on the chain, only the invalid
+// sType passes through as Invalid.
+TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt2 = {};
-        clientExt2.chain.sType = WGPUSType_Invalid;
-        clientExt2.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_Invalid;
+    clientExt2.chain.next = nullptr;
 
-        WGPUPrimitiveDepthClampingState clientExt1 = {};
-        clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt1.chain.next = &clientExt2.chain;
-        clientExt1.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain);
-                    EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
 
-                    EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(ext->chain.next->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
+                EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
+                EXPECT_EQ(ext->chain.next->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
 
-        // Swap the order of the chained structs.
-        renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
-        clientExt2.chain.next = &clientExt1.chain;
-        clientExt1.chain.next = nullptr;
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
 
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain->next);
-                    EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
-                    EXPECT_EQ(ext->chain.next, nullptr);
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain->next);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
 
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
index e12bdd6..f2a50da 100644
--- a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
@@ -19,271 +19,259 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Exactly;
-    using testing::Mock;
-    using testing::Return;
+using testing::_;
+using testing::Exactly;
+using testing::Mock;
+using testing::Return;
 
-    class WireInjectDeviceTests : public WireTest {
-      public:
-        WireInjectDeviceTests() {
-        }
-        ~WireInjectDeviceTests() override = default;
-    };
+class WireInjectDeviceTests : public WireTest {
+  public:
+    WireInjectDeviceTests() {}
+    ~WireInjectDeviceTests() override = default;
+};
 
-    // Test that reserving and injecting a device makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
+// Test that reserving and injecting a device makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    WGPUBufferDescriptor bufferDesc = {};
+    wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
+    WGPUBuffer serverBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.device, reservation2.device);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectDeviceTests, InjectExistingID) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the server only borrows the device and does a single reference-release
+TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    // Injecting the device adds a reference
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Releasing the device removes a single reference and clears its error callbacks.
+    wgpuDeviceRelease(reservation.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that it is an error to get the primary queue of a device before it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    wgpuDeviceGetQueue(reservation.device);
+    FlushClient(false);
+}
+
+// Test that it is valid to get the primary queue of a device after it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    wgpuDeviceGetQueue(reservation.device);
+
+    WGPUQueue apiQueue = api.GetNewQueue();
+    EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the list of live devices can be reflected using GetDevice.
+TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
+    // Reserve two devices.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    // Inject both devices.
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Test that both devices can be reflected.
+    ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Release the first device
+    wgpuDeviceRelease(reservation1.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // The first device should no longer reflect, but the second should
+    ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// This is a regression test where a second device reservation invalidated pointers into the
+// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
+// objects instead.
+TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
+    // Reserve one device, inject it, and get the primary queue.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUCommandEncoder commandEncoder =
+        wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
+
+    WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
+        .WillOnce(Return(serverCommandEncoder));
+    FlushClient();
+
+    // Reserve a second device, and inject it.
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Release the encoder. This should work without error because it stores a stable
+    // pointer to its device's list of child objects. On destruction, it removes itself from the
+    // list.
+    wgpuCommandEncoderRelease(commandEncoder);
+    EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// Test that a device reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        WGPUBufferDescriptor bufferDesc = {};
-        wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
-        WGPUBuffer serverBuffer = api.GetNewBuffer();
-        EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
-        ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-        ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.device, reservation2.device);
-    }
-
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectDeviceTests, InjectExistingID) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that the server only borrows the device and does a single reference-release
-    TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        // Injecting the device adds a reference
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // Releasing the device removes a single reference and clears its error callbacks.
         wgpuDeviceRelease(reservation.device);
-        EXPECT_CALL(api, DeviceRelease(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
-        FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
-    }
-
-    // Test that it is an error to get the primary queue of a device before it has been
-    // injected on the server.
-    TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        wgpuDeviceGetQueue(reservation.device);
         FlushClient(false);
     }
 
-    // Test that it is valid to get the primary queue of a device after it has been
-    // injected on the server.
-    TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        wgpuDeviceGetQueue(reservation.device);
-
-        WGPUQueue apiQueue = api.GetNewQueue();
-        EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that the list of live devices can be reflected using GetDevice.
-    TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
-        // Reserve two devices.
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+        GetWireClient()->ReclaimDeviceReservation(reservation1);
+
         ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
 
-        // Inject both devices.
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-        WGPUDevice serverDevice1 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
-
-        WGPUDevice serverDevice2 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice2));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
-
-        // Test that both devices can be reflected.
-        ASSERT_EQ(serverDevice1,
-                  GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
-        ASSERT_EQ(serverDevice2,
-                  GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
-
-        // Release the first device
-        wgpuDeviceRelease(reservation1.device);
-        EXPECT_CALL(api, DeviceRelease(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+        // No errors should occur.
         FlushClient();
-
-        // The first device should no longer reflect, but the second should
-        ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
-        ASSERT_EQ(serverDevice2,
-                  GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
     }
-
-    // This is a regression test where a second device reservation invalidated pointers into the
-    // KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
-    // objects instead.
-    TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
-        // Reserve one device, inject it, and get the primary queue.
-        ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice1 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
-
-        WGPUCommandEncoder commandEncoder =
-            wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
-
-        WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
-            .WillOnce(Return(serverCommandEncoder));
-        FlushClient();
-
-        // Reserve a second device, and inject it.
-        ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice2 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice2));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
-
-        // Release the encoder. This should work without error because it stores a stable
-        // pointer to its device's list of child objects. On destruction, it removes itself from the
-        // list.
-        wgpuCommandEncoderRelease(commandEncoder);
-        EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
-    }
-
-    // Test that a device reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedDevice reservation = GetWireClient()->ReserveDevice();
-            wgpuDeviceRelease(reservation.device);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-            GetWireClient()->ReclaimDeviceReservation(reservation1);
-
-            ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
index 64c8b1b..f9fdc52 100644
--- a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
@@ -17,106 +17,107 @@
 #include "dawn/wire/WireClient.h"
 #include "dawn/wire/WireServer.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::Mock;
-    using testing::NotNull;
-    using testing::Return;
+using testing::Mock;
+using testing::NotNull;
+using testing::Return;
 
-    class WireInjectInstanceTests : public WireTest {
-      public:
-        WireInjectInstanceTests() {
-        }
-        ~WireInjectInstanceTests() override = default;
-    };
+class WireInjectInstanceTests : public WireTest {
+  public:
+    WireInjectInstanceTests() {}
+    ~WireInjectInstanceTests() override = default;
+};
 
-    // Test that reserving and injecting an instance makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectInstanceTests, CallAfterReserveInject) {
+// Test that reserving and injecting an instance makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectInstanceTests, CallAfterReserveInject) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    WGPUSurfaceDescriptor surfaceDesc = {};
+    wgpuInstanceCreateSurface(reservation.instance, &surfaceDesc);
+    WGPUSurface serverSurface = api.GetNewSurface();
+    EXPECT_CALL(api, InstanceCreateSurface(serverInstance, NotNull()))
+        .WillOnce(Return(serverSurface));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectInstanceTests, ReserveDifferentIDs) {
+    ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+    ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.instance, reservation2.instance);
+}
+
+// Test that injecting the same id fails.
+TEST_F(WireInjectInstanceTests, InjectExistingID) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+}
+
+// Test that the server only borrows the instance and does a single reference-release
+TEST_F(WireInjectInstanceTests, InjectedInstanceLifetime) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    // Injecting the instance adds a reference
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    // Releasing the instance removes a single reference.
+    wgpuInstanceRelease(reservation.instance);
+    EXPECT_CALL(api, InstanceRelease(serverInstance));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a device reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectInstanceTests, ReclaimInstanceReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        WGPUSurfaceDescriptor surfaceDesc = {};
-        wgpuInstanceCreateSurface(reservation.instance, &surfaceDesc);
-        WGPUSurface serverSurface = api.GetNewSurface();
-        EXPECT_CALL(api, InstanceCreateSurface(serverInstance, NotNull()))
-            .WillOnce(Return(serverSurface));
-        FlushClient();
+        wgpuInstanceRelease(reservation.instance);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectInstanceTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+        GetWireClient()->ReclaimInstanceReservation(reservation1);
+
         ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.instance, reservation2.instance);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id fails.
-    TEST_F(WireInjectInstanceTests, InjectExistingID) {
-        ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                     reservation.generation));
-    }
-
-    // Test that the server only borrows the instance and does a single reference-release
-    TEST_F(WireInjectInstanceTests, InjectedInstanceLifetime) {
-        ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        // Injecting the instance adds a reference
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        // Releasing the instance removes a single reference.
-        wgpuInstanceRelease(reservation.instance);
-        EXPECT_CALL(api, InstanceRelease(serverInstance));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
+}
 
-    // Test that a device reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectInstanceTests, ReclaimInstanceReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedInstance reservation = GetWireClient()->ReserveInstance();
-            wgpuInstanceRelease(reservation.instance);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
-            GetWireClient()->ReclaimInstanceReservation(reservation1);
-
-            ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
-
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
index 6ba058b..f06d53e 100644
--- a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
@@ -19,101 +19,100 @@
 
 namespace dawn::wire {
 
-    using testing::Mock;
+using testing::Mock;
 
-    class WireInjectSwapChainTests : public WireTest {
-      public:
-        WireInjectSwapChainTests() {
-        }
-        ~WireInjectSwapChainTests() override = default;
-    };
+class WireInjectSwapChainTests : public WireTest {
+  public:
+    WireInjectSwapChainTests() {}
+    ~WireInjectSwapChainTests() override = default;
+};
 
-    // Test that reserving and injecting a swapchain makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
+// Test that reserving and injecting a swapchain makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    wgpuSwapChainPresent(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
+    ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+    ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectSwapChainTests, InjectExistingID) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                  reservation.generation, reservation.deviceId,
+                                                  reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the swapchain and does a single reference-release
+TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    // Injecting the swapchain adds a reference
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // Releasing the swapchain removes a single reference.
+    wgpuSwapChainRelease(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a swapchain reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        wgpuSwapChainPresent(reservation.swapchain);
-        EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
-        FlushClient();
+        wgpuSwapChainRelease(reservation.swapchain);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+        GetWireClient()->ReclaimSwapChainReservation(reservation1);
+
         ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectSwapChainTests, InjectExistingID) {
-        ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                      reservation.generation, reservation.deviceId,
-                                                      reservation.deviceGeneration));
-    }
-
-    // Test that the server only borrows the swapchain and does a single reference-release
-    TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
-        ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        // Injecting the swapchain adds a reference
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        // Releasing the swapchain removes a single reference.
-        wgpuSwapChainRelease(reservation.swapchain);
-        EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
-
-    // Test that a swapchain reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-            wgpuSwapChainRelease(reservation.swapchain);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
-            GetWireClient()->ReclaimSwapChainReservation(reservation1);
-
-            ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
index a15fd6c..baabaa5 100644
--- a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
@@ -19,104 +19,99 @@
 
 namespace dawn::wire {
 
-    using testing::Mock;
-    using testing::Return;
+using testing::Mock;
+using testing::Return;
 
-    class WireInjectTextureTests : public WireTest {
-      public:
-        WireInjectTextureTests() {
-        }
-        ~WireInjectTextureTests() override = default;
-    };
+class WireInjectTextureTests : public WireTest {
+  public:
+    WireInjectTextureTests() {}
+    ~WireInjectTextureTests() override = default;
+};
 
-    // Test that reserving and injecting a texture makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
+// Test that reserving and injecting a texture makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    wgpuTextureCreateView(reservation.texture, nullptr);
+    WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
+    EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
+    ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+    ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.texture, reservation2.texture);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectTextureTests, InjectExistingID) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                                reservation.deviceId,
+                                                reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the texture and does a single reference-release
+TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    // Injecting the texture adds a reference
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // Releasing the texture removes a single reference.
+    wgpuTextureRelease(reservation.texture);
+    EXPECT_CALL(api, TextureRelease(apiTexture));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a texture reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        wgpuTextureCreateView(reservation.texture, nullptr);
-        WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
-        EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr))
-            .WillOnce(Return(apiPlaceholderView));
-        FlushClient();
+        wgpuTextureRelease(reservation.texture);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+        GetWireClient()->ReclaimTextureReservation(reservation1);
+
         ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.texture, reservation2.texture);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectTextureTests, InjectExistingID) {
-        ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                    reservation.generation, reservation.deviceId,
-                                                    reservation.deviceGeneration));
-    }
-
-    // Test that the server only borrows the texture and does a single reference-release
-    TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
-        ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        // Injecting the texture adds a reference
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        // Releasing the texture removes a single reference.
-        wgpuTextureRelease(reservation.texture);
-        EXPECT_CALL(api, TextureRelease(apiTexture));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
-
-    // Test that a texture reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-            wgpuTextureRelease(reservation.texture);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
-            GetWireClient()->ReclaimTextureReservation(reservation1);
-
-            ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
index 9ef1e29..16dea28 100644
--- a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
@@ -23,273 +23,274 @@
 
 #include "webgpu/webgpu_cpp.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::Invoke;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::SetArgPointee;
-    using testing::StrEq;
-    using testing::WithArg;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::Return;
+using testing::SetArgPointee;
+using testing::StrEq;
+using testing::WithArg;
 
-    class WireInstanceBasicTest : public WireTest {};
-    class WireInstanceTests : public WireTest {
-      protected:
-        void SetUp() override {
-            WireTest::SetUp();
+class WireInstanceBasicTest : public WireTest {};
+class WireInstanceTests : public WireTest {
+  protected:
+    void SetUp() override {
+        WireTest::SetUp();
 
-            auto reservation = GetWireClient()->ReserveInstance();
-            instance = wgpu::Instance::Acquire(reservation.instance);
-
-            apiInstance = api.GetNewInstance();
-            EXPECT_CALL(api, InstanceReference(apiInstance));
-            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
-                                                        reservation.generation));
-        }
-
-        void TearDown() override {
-            instance = nullptr;
-            WireTest::TearDown();
-        }
-
-        wgpu::Instance instance;
-        WGPUInstance apiInstance;
-    };
-
-    // Test that an Instance can be reserved and injected into the wire.
-    TEST_F(WireInstanceBasicTest, ReserveAndInject) {
         auto reservation = GetWireClient()->ReserveInstance();
-        wgpu::Instance instance = wgpu::Instance::Acquire(reservation.instance);
+        instance = wgpu::Instance::Acquire(reservation.instance);
 
-        WGPUInstance apiInstance = api.GetNewInstance();
+        apiInstance = api.GetNewInstance();
         EXPECT_CALL(api, InstanceReference(apiInstance));
         EXPECT_TRUE(
             GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
+    }
 
+    void TearDown() override {
         instance = nullptr;
+        WireTest::TearDown();
+    }
 
-        EXPECT_CALL(api, InstanceRelease(apiInstance));
+    wgpu::Instance instance;
+    WGPUInstance apiInstance;
+};
+
+// Test that an Instance can be reserved and injected into the wire.
+TEST_F(WireInstanceBasicTest, ReserveAndInject) {
+    auto reservation = GetWireClient()->ReserveInstance();
+    wgpu::Instance instance = wgpu::Instance::Acquire(reservation.instance);
+
+    WGPUInstance apiInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(apiInstance));
+    EXPECT_TRUE(
+        GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
+
+    instance = nullptr;
+
+    EXPECT_CALL(api, InstanceRelease(apiInstance));
+    FlushClient();
+}
+
+// Test that RequestAdapterOptions are passed from the client to the server.
+TEST_F(WireInstanceTests, RequestAdapterPassesOptions) {
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    for (wgpu::PowerPreference powerPreference :
+         {wgpu::PowerPreference::LowPower, wgpu::PowerPreference::HighPerformance}) {
+        wgpu::RequestAdapterOptions options = {};
+        options.powerPreference = powerPreference;
+
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([&](const WGPURequestAdapterOptions* apiOptions) {
+                EXPECT_EQ(apiOptions->powerPreference,
+                          static_cast<WGPUPowerPreference>(options.powerPreference));
+                EXPECT_EQ(apiOptions->forceFallbackAdapter, options.forceFallbackAdapter);
+            })));
         FlushClient();
     }
 
-    // Test that RequestAdapterOptions are passed from the client to the server.
-    TEST_F(WireInstanceTests, RequestAdapterPassesOptions) {
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
+    // Delete the instance now, or it'll call the mock callback after it's deleted.
+    instance = nullptr;
+}
 
-        for (wgpu::PowerPreference powerPreference :
-             {wgpu::PowerPreference::LowPower, wgpu::PowerPreference::HighPerformance}) {
-            wgpu::RequestAdapterOptions options = {};
-            options.powerPreference = powerPreference;
+// Test that RequestAdapter forwards the adapter information to the client.
+TEST_F(WireInstanceTests, RequestAdapterSuccess) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-            instance.RequestAdapter(&options, cb.Callback(), userdata);
+    wgpu::AdapterProperties fakeProperties = {};
+    fakeProperties.vendorID = 0x134;
+    fakeProperties.deviceID = 0x918;
+    fakeProperties.name = "fake adapter";
+    fakeProperties.driverDescription = "hello world";
+    fakeProperties.backendType = wgpu::BackendType::D3D12;
+    fakeProperties.adapterType = wgpu::AdapterType::IntegratedGPU;
 
-            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([&](const WGPURequestAdapterOptions* apiOptions) {
-                    EXPECT_EQ(apiOptions->powerPreference,
-                              static_cast<WGPUPowerPreference>(options.powerPreference));
-                    EXPECT_EQ(apiOptions->forceFallbackAdapter, options.forceFallbackAdapter);
+    wgpu::SupportedLimits fakeLimits = {};
+    fakeLimits.limits.maxTextureDimension1D = 433;
+    fakeLimits.limits.maxVertexAttributes = 1243;
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth32FloatStencil8,
+        wgpu::FeatureName::TextureCompressionBC,
+    };
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUAdapter apiAdapter = api.GetNewAdapter();
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                .WillOnce(
+                    SetArgPointee<1>(*reinterpret_cast<WGPUAdapterProperties*>(&fakeProperties)));
+
+            EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                    return true;
                 })));
-            FlushClient();
-        }
 
-        // Delete the instance now, or it'll call the mock callback after it's deleted.
-        instance = nullptr;
-    }
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
 
-    // Test that RequestAdapter forwards the adapter information to the client.
-    TEST_F(WireInstanceTests, RequestAdapterSuccess) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Success,
+                                                   apiAdapter, nullptr);
+        }));
+    FlushClient();
 
-        wgpu::AdapterProperties fakeProperties = {};
-        fakeProperties.vendorID = 0x134;
-        fakeProperties.deviceID = 0x918;
-        fakeProperties.name = "fake adapter";
-        fakeProperties.driverDescription = "hello world";
-        fakeProperties.backendType = wgpu::BackendType::D3D12;
-        fakeProperties.adapterType = wgpu::AdapterType::IntegratedGPU;
+    // Expect the callback in the client and all the adapter information to match.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+            wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
 
-        wgpu::SupportedLimits fakeLimits = {};
-        fakeLimits.limits.maxTextureDimension1D = 433;
-        fakeLimits.limits.maxVertexAttributes = 1243;
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+            EXPECT_EQ(properties.vendorID, fakeProperties.vendorID);
+            EXPECT_EQ(properties.deviceID, fakeProperties.deviceID);
+            EXPECT_STREQ(properties.name, fakeProperties.name);
+            EXPECT_STREQ(properties.driverDescription, fakeProperties.driverDescription);
+            EXPECT_EQ(properties.backendType, fakeProperties.backendType);
+            EXPECT_EQ(properties.adapterType, fakeProperties.adapterType);
 
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth32FloatStencil8,
-            wgpu::FeatureName::TextureCompressionBC,
-        };
+            wgpu::SupportedLimits limits;
+            EXPECT_TRUE(adapter.GetLimits(&limits));
+            EXPECT_EQ(limits.limits.maxTextureDimension1D, fakeLimits.limits.maxTextureDimension1D);
+            EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
 
-        // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUAdapter apiAdapter = api.GetNewAdapter();
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                    .WillOnce(SetArgPointee<1>(
-                        *reinterpret_cast<WGPUAdapterProperties*>(&fakeProperties)));
+            std::vector<wgpu::FeatureName> features;
+            features.resize(adapter.EnumerateFeatures(nullptr));
+            ASSERT_EQ(features.size(), fakeFeatures.size());
+            EXPECT_EQ(adapter.EnumerateFeatures(&features[0]), features.size());
 
-                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
-                        return true;
-                    })));
+            std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+            for (wgpu::FeatureName feature : features) {
+                EXPECT_EQ(featureSet.erase(feature), 1u);
+            }
+        })));
+    FlushServer();
+}
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
+// Test that features returned by the implementation that aren't supported
+// in the wire are not exposed.
+TEST_F(WireInstanceTests, RequestAdapterWireLacksFeatureSupport) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-                api.CallInstanceRequestAdapterCallback(
-                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-            }));
-        FlushClient();
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth24UnormStencil8,
+        // Some value that is not a valid feature
+        static_cast<wgpu::FeatureName>(-2),
+    };
 
-        // Expect the callback in the client and all the adapter information to match.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
-                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUAdapter apiAdapter = api.GetNewAdapter();
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                    *properties = {};
+                    properties->name = "";
+                    properties->driverDescription = "";
+                })));
 
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-                EXPECT_EQ(properties.vendorID, fakeProperties.vendorID);
-                EXPECT_EQ(properties.deviceID, fakeProperties.deviceID);
-                EXPECT_STREQ(properties.name, fakeProperties.name);
-                EXPECT_STREQ(properties.driverDescription, fakeProperties.driverDescription);
-                EXPECT_EQ(properties.backendType, fakeProperties.backendType);
-                EXPECT_EQ(properties.adapterType, fakeProperties.adapterType);
+            EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *limits = {};
+                    return true;
+                })));
 
-                wgpu::SupportedLimits limits;
-                EXPECT_TRUE(adapter.GetLimits(&limits));
-                EXPECT_EQ(limits.limits.maxTextureDimension1D,
-                          fakeLimits.limits.maxTextureDimension1D);
-                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
 
-                std::vector<wgpu::FeatureName> features;
-                features.resize(adapter.EnumerateFeatures(nullptr));
-                ASSERT_EQ(features.size(), fakeFeatures.size());
-                EXPECT_EQ(adapter.EnumerateFeatures(&features[0]), features.size());
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Success,
+                                                   apiAdapter, nullptr);
+        }));
+    FlushClient();
 
-                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
-                for (wgpu::FeatureName feature : features) {
-                    EXPECT_EQ(featureSet.erase(feature), 1u);
-                }
-            })));
-        FlushServer();
-    }
+    // Expect the callback in the client and all the adapter information to match.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+            wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
 
-    // Test that features returned by the implementation that aren't supported
-    // in the wire are not exposed.
-    TEST_F(WireInstanceTests, RequestAdapterWireLacksFeatureSupport) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
+            wgpu::FeatureName feature;
+            ASSERT_EQ(adapter.EnumerateFeatures(nullptr), 1u);
+            adapter.EnumerateFeatures(&feature);
 
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth24UnormStencil8,
-            // Some value that is not a valid feature
-            static_cast<wgpu::FeatureName>(-2),
-        };
+            EXPECT_EQ(feature, wgpu::FeatureName::Depth24UnormStencil8);
+        })));
+    FlushServer();
+}
 
-        // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUAdapter apiAdapter = api.GetNewAdapter();
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
-                        *properties = {};
-                        properties->name = "";
-                        properties->driverDescription = "";
-                    })));
+// Test that RequestAdapter errors forward to the client.
+TEST_F(WireInstanceTests, RequestAdapterError) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *limits = {};
-                        return true;
-                    })));
+    // Expect the server to receive the message. Then, mock an error.
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Error,
+                                                   nullptr, "Some error");
+        }));
+    FlushClient();
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
+    // Expect the callback in the client.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Error, nullptr, StrEq("Some error"), this))
+        .Times(1);
+    FlushServer();
+}
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-                api.CallInstanceRequestAdapterCallback(
-                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-            }));
-        FlushClient();
+// Test that RequestAdapter receives unknown status if the instance is deleted
+// before the callback happens.
+TEST_F(WireInstanceTests, RequestAdapterInstanceDestroyedBeforeCallback) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-        // Expect the callback in the client and all the adapter information to match.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
-                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    instance = nullptr;
+}
 
-                wgpu::FeatureName feature;
-                ASSERT_EQ(adapter.EnumerateFeatures(nullptr), 1u);
-                adapter.EnumerateFeatures(&feature);
+// Test that RequestAdapter receives unknown status if the wire is disconnected
+// before the callback happens.
+TEST_F(WireInstanceTests, RequestAdapterWireDisconnectBeforeCallback) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_EQ(feature, wgpu::FeatureName::Depth24UnormStencil8);
-            })));
-        FlushServer();
-    }
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    GetWireClient()->Disconnect();
+}
 
-    // Test that RequestAdapter errors forward to the client.
-    TEST_F(WireInstanceTests, RequestAdapterError) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock an error.
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Error,
-                                                       nullptr, "Some error");
-            }));
-        FlushClient();
-
-        // Expect the callback in the client.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Error, nullptr, StrEq("Some error"), this))
-            .Times(1);
-        FlushServer();
-    }
-
-    // Test that RequestAdapter receives unknown status if the instance is deleted
-    // before the callback happens.
-    TEST_F(WireInstanceTests, RequestAdapterInstanceDestroyedBeforeCallback) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        instance = nullptr;
-    }
-
-    // Test that RequestAdapter receives unknown status if the wire is disconnected
-    // before the callback happens.
-    TEST_F(WireInstanceTests, RequestAdapterWireDisconnectBeforeCallback) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        GetWireClient()->Disconnect();
-    }
-
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
index 484c867..b2ce580 100644
--- a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
@@ -21,1061 +21,1046 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Eq;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Pointee;
-    using testing::Return;
-    using testing::StrictMock;
-    using testing::WithArg;
+using testing::_;
+using testing::Eq;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Pointee;
+using testing::Return;
+using testing::StrictMock;
+using testing::WithArg;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockBufferMapCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockBufferMapCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
-        void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
-            mockBufferMapCallback->Call(status, userdata);
-        }
+std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    mockBufferMapCallback->Call(status, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    // WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping.
-    // They test the basic success and error cases for buffer mapping, and they test
-    // mocked failures of each fallible MemoryTransferService method that an embedder
-    // could implement.
-    // The test harness defines multiple helpers for expecting operations on Read/Write handles
-    // and for mocking failures. The helpers are designed such that for a given run of a test,
-    // a Serialization expection has a corresponding Deserialization expectation for which the
-    // serialized data must match.
-    // There are tests which check for Success for every mapping operation which mock an entire
-    // mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests
-    // which check for errors perform the same mapping operations but insert mocked failures for
-    // various mapping or MemoryTransferService operations.
-    class WireMemoryTransferServiceTests : public WireTest {
-      public:
-        WireMemoryTransferServiceTests() {
-        }
-        ~WireMemoryTransferServiceTests() override = default;
+// WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping.
+// They test the basic success and error cases for buffer mapping, and they test
+// mocked failures of each fallible MemoryTransferService method that an embedder
+// could implement.
+// The test harness defines multiple helpers for expecting operations on Read/Write handles
+// and for mocking failures. The helpers are designed such that for a given run of a test,
+// a Serialization expection has a corresponding Deserialization expectation for which the
+// serialized data must match.
+// There are tests which check for Success for every mapping operation which mock an entire
+// mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests
+// which check for errors perform the same mapping operations but insert mocked failures for
+// various mapping or MemoryTransferService operations.
+class WireMemoryTransferServiceTests : public WireTest {
+  public:
+    WireMemoryTransferServiceTests() {}
+    ~WireMemoryTransferServiceTests() override = default;
 
-        client::MemoryTransferService* GetClientMemoryTransferService() override {
-            return &clientMemoryTransferService;
-        }
-
-        server::MemoryTransferService* GetServerMemoryTransferService() override {
-            return &serverMemoryTransferService;
-        }
-
-        void SetUp() override {
-            WireTest::SetUp();
-
-            mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
-
-            // TODO(enga): Make this thread-safe.
-            mBufferContent++;
-            mMappedBufferContent = 0;
-            mUpdatedBufferContent++;
-            mSerializeCreateInfo++;
-            mReadHandleSerializeDataInfo++;
-            mWriteHandleSerializeDataInfo++;
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockBufferMapCallback = nullptr;
-        }
-
-        void FlushClient(bool success = true) {
-            WireTest::FlushClient(success);
-            Mock::VerifyAndClearExpectations(&serverMemoryTransferService);
-        }
-
-        void FlushServer(bool success = true) {
-            WireTest::FlushServer(success);
-
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-            Mock::VerifyAndClearExpectations(&clientMemoryTransferService);
-        }
-
-      protected:
-        using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle;
-        using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle;
-        using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
-        using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
-
-        std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(
-            WGPUBufferUsage usage = WGPUBufferUsage_None) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = kBufferSize;
-            descriptor.usage = usage;
-
-            WGPUBuffer apiBuffer = api.GetNewBuffer();
-            WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-                .WillOnce(Return(apiBuffer))
-                .RetiresOnSaturation();
-
-            return std::make_pair(apiBuffer, buffer);
-        }
-
-        std::pair<WGPUBuffer, WGPUBuffer> CreateBufferMapped(
-            WGPUBufferUsage usage = WGPUBufferUsage_None) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = sizeof(mBufferContent);
-            descriptor.mappedAtCreation = true;
-            descriptor.usage = usage;
-
-            WGPUBuffer apiBuffer = api.GetNewBuffer();
-
-            WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-            EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent)))
-                .WillOnce(Return(&mMappedBufferContent));
-
-            return std::make_pair(apiBuffer, buffer);
-        }
-
-        ClientReadHandle* ExpectReadHandleCreation() {
-            // Create the handle first so we can use it in later expectations.
-            ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle();
-
-            EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
-
-            return handle;
-        }
-
-        void MockReadHandleCreationFailure() {
-            EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
-        }
-
-        void ExpectReadHandleSerialization(ClientReadHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle))
-                .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
-                    return sizeof(mSerializeCreateInfo);
-                }));
-        }
-
-        ServerReadHandle* ExpectServerReadHandleDeserialize() {
-            // Create the handle first so we can use it in later expectations.
-            ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle();
-
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                sizeof(mSerializeCreateInfo), _))
-                .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) {
-                    *readHandle = handle;
-                    return true;
-                }));
-
-            return handle;
-        }
-
-        void MockServerReadHandleDeserializeFailure() {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                sizeof(mSerializeCreateInfo), _))
-                .WillOnce(InvokeWithoutArgs([&]() { return false; }));
-        }
-
-        void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
-                .WillOnce(
-                    InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
-                .WillOnce(WithArg<4>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mReadHandleSerializeDataInfo,
-                           sizeof(mReadHandleSerializeDataInfo));
-                    return sizeof(mReadHandleSerializeDataInfo);
-                }));
-        }
-
-        void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle,
-                                                         uint32_t* mappedData) {
-            EXPECT_CALL(
-                clientMemoryTransferService,
-                OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
-                                                  sizeof(mReadHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(true));
-        }
-
-        void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) {
-            EXPECT_CALL(
-                clientMemoryTransferService,
-                OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
-                                                  sizeof(mReadHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(false));
-        }
-
-        ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) {
-            // Create the handle first so we can use it in later expectations.
-            ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle();
-
-            EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
-            if (mappedAtCreation) {
-                EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle))
-                    .WillOnce(Return(&mBufferContent));
-            }
-
-            return handle;
-        }
-
-        void MockWriteHandleCreationFailure() {
-            EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
-        }
-
-        void ExpectWriteHandleSerialization(ClientWriteHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle))
-                .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
-                    return sizeof(mSerializeCreateInfo);
-                }));
-        }
-
-        ServerWriteHandle* ExpectServerWriteHandleDeserialization() {
-            // Create the handle first so it can be used in later expectations.
-            ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle();
-
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                 sizeof(mSerializeCreateInfo), _))
-                .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) {
-                    *writeHandle = handle;
-                    return true;
-                }));
-
-            return handle;
-        }
-
-        void MockServerWriteHandleDeserializeFailure() {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                 sizeof(mSerializeCreateInfo), _))
-                .WillOnce(Return(false));
-        }
-
-        void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService,
-                        OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
-                .WillOnce(
-                    InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
-            EXPECT_CALL(clientMemoryTransferService,
-                        OnWriteHandleSerializeDataUpdate(handle, _, _, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
-                           sizeof(mWriteHandleSerializeDataInfo));
-                    return sizeof(mWriteHandleSerializeDataInfo);
-                }));
-        }
-
-        void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
-                                                          uint32_t expectedData) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnWriteHandleDeserializeDataUpdate(
-                            handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
-                            sizeof(mWriteHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(true));
-        }
-
-        void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnWriteHandleDeserializeDataUpdate(
-                            handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
-                            sizeof(mWriteHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(false));
-        }
-
-        // Arbitrary values used within tests to check if serialized data is correctly passed
-        // between the client and server. The static data changes between runs of the tests and
-        // test expectations will check that serialized values are passed to the respective
-        // deserialization function.
-        static uint32_t mSerializeCreateInfo;
-        static uint32_t mReadHandleSerializeDataInfo;
-        static uint32_t mWriteHandleSerializeDataInfo;
-
-        // Represents the buffer contents for the test.
-        static uint32_t mBufferContent;
-
-        static constexpr size_t kBufferSize = sizeof(mBufferContent);
-
-        // The client's zero-initialized buffer for writing.
-        uint32_t mMappedBufferContent = 0;
-
-        // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client
-        // performs a write. Test expectations should check that |mBufferContent ==
-        // mUpdatedBufferContent| after all writes are flushed.
-        static uint32_t mUpdatedBufferContent;
-
-        StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
-        StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
-    };
-
-    uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
-    uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349;
-    uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242;
-    uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394;
-    uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235;
-
-    // Test successful mapping for reading.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        // The client should receive the handle data update message from the server.
-        ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+    client::MemoryTransferService* GetClientMemoryTransferService() override {
+        return &clientMemoryTransferService;
     }
 
-    // Test ReadHandle destroy behavior
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-
-        FlushClient();
+    server::MemoryTransferService* GetServerMemoryTransferService() override {
+        return &serverMemoryTransferService;
     }
 
-    // Test unsuccessful mapping for reading.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
+    void SetUp() override {
+        WireTest::SetUp();
 
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
 
-        // The server should deserialize the ReadHandle from the client.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a failed callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        // The client receives an error callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+        // TODO(enga): Make this thread-safe.
+        mBufferContent++;
+        mMappedBufferContent = 0;
+        mUpdatedBufferContent++;
+        mSerializeCreateInfo++;
+        mReadHandleSerializeDataInfo++;
+        mWriteHandleSerializeDataInfo++;
     }
 
-    // Test ReadHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) {
-        // Mock a ReadHandle creation failure
-        MockReadHandleCreationFailure();
+    void TearDown() override {
+        WireTest::TearDown();
 
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
+    }
+
+    void FlushClient(bool success = true) {
+        WireTest::FlushClient(success);
+        Mock::VerifyAndClearExpectations(&serverMemoryTransferService);
+    }
+
+    void FlushServer(bool success = true) {
+        WireTest::FlushServer(success);
+
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
+        Mock::VerifyAndClearExpectations(&clientMemoryTransferService);
+    }
+
+  protected:
+    using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle;
+    using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle;
+    using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
+    using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
+
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(WGPUBufferUsage usage = WGPUBufferUsage_None) {
         WGPUBufferDescriptor descriptor = {};
         descriptor.size = kBufferSize;
-        descriptor.usage = WGPUBufferUsage_MapRead;
+        descriptor.usage = usage;
 
-        wgpuDeviceCreateBuffer(device, &descriptor);
-    }
-
-    // Test MapRead DeserializeReadHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading..
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // Mock a Deserialization failure.
-        MockServerReadHandleDeserializeFailure();
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-    }
-
-    // Test read handle DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client should receive the handle data update message from the server.
-        // Mock a deserialization failure.
-        MockClientReadHandleDeserializeDataUpdateFailure(clientHandle);
-
-        // Failed deserialization is a fatal failure and the client synchronously receives a
-        // DEVICE_LOST callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1);
-
-        FlushServer(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test mapping for reading destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading..
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        // The client should receive the handle data update message from the server.
-        ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
-
-        FlushServer();
-
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-            wgpuBufferDestroy(buffer);
-
-            EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
-
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
-
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
-        }
-    }
-
-    // Test successful mapping for writing.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // The client will then serialize data update and destroy the handle on Unmap()
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test WriteHandle destroy behavior
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test unsuccessful MapWrite.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a WriteHandle on buffer creation with MapWrite
-        // usage.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock an error callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        // The client receives an error callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test WriteHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) {
-        // Mock a WriteHandle creation failure
-        MockWriteHandleCreationFailure();
-
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = kBufferSize;
-        descriptor.usage = WGPUBufferUsage_MapWrite;
-
-        wgpuDeviceCreateBuffer(device, &descriptor);
-    }
-
-    // Test MapWrite DeserializeWriteHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a WriteHandle on buffer creation with MapWrite
-        // usage.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // Mock a deserialization failure.
-        MockServerWriteHandleDeserializeFailure();
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-    }
-
-    // Test MapWrite DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a success callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // The client will then serialize data update
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message. Mock a deserialization failure.
-        MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test MapWrite destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            // The handle is destroyed once the buffer is destroyed.
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-            wgpuBufferDestroy(buffer);
-
-            EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
-
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
-
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
-        }
-    }
-
-    // Test successful buffer creation with mappedAtCreation = true.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
-
-        // After the handle is updated it can be destroyed.
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test buffer creation with mappedAtCreation WriteHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) {
-        // Mock a WriteHandle creation failure
-        MockWriteHandleCreationFailure();
-
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = sizeof(mBufferContent);
-        descriptor.mappedAtCreation = true;
-
+        WGPUBuffer apiBuffer = api.GetNewBuffer();
         WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-        EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent)));
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+
+        return std::make_pair(apiBuffer, buffer);
     }
 
-    // Test buffer creation with mappedAtCreation DeserializeWriteHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        // The server should then deserialize the WriteHandle from the client.
-        MockServerWriteHandleDeserializeFailure();
-
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBufferMapped(
+        WGPUBufferUsage usage = WGPUBufferUsage_None) {
         WGPUBufferDescriptor descriptor = {};
         descriptor.size = sizeof(mBufferContent);
         descriptor.mappedAtCreation = true;
+        descriptor.usage = usage;
 
         WGPUBuffer apiBuffer = api.GetNewBuffer();
 
-        wgpuDeviceCreateBuffer(device, &descriptor);
+        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
 
         EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        // Now bufferGetMappedRange won't be called if deserialize writeHandle fails
+        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent)))
+            .WillOnce(Return(&mMappedBufferContent));
 
-        FlushClient(false);
-
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+        return std::make_pair(apiBuffer, buffer);
     }
 
-    // Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
+    ClientReadHandle* ExpectReadHandleCreation() {
+        // Create the handle first so we can use it in later expectations.
+        ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle();
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message. Mock a deserialization failure.
-        MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
-
-        FlushClient(false);
-
-        // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed.
-        // The server side writeHandle is destructed at buffer destruction.
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        return handle;
     }
 
-    // Test mappedAtCreation=true destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
+    void MockReadHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
+    }
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+    void ExpectReadHandleSerialization(ClientReadHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
+    ServerReadHandle* ExpectServerReadHandleDeserialize() {
+        // Create the handle first so we can use it in later expectations.
+        ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle();
 
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) {
+                *readHandle = handle;
+                return true;
+            }));
 
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-            wgpuBufferDestroy(buffer);
+        return handle;
+    }
 
-            EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
+    void MockServerReadHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(InvokeWithoutArgs([&]() { return false; }));
+    }
 
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
+    void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
+            .WillOnce(WithArg<4>([&](void* serializePointer) {
+                memcpy(serializePointer, &mReadHandleSerializeDataInfo,
+                       sizeof(mReadHandleSerializeDataInfo));
+                return sizeof(mReadHandleSerializeDataInfo);
+            }));
+    }
 
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
+    void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle,
+                                                     uint32_t* mappedData) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) {
+        // Create the handle first so we can use it in later expectations.
+        ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle();
+
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
+        if (mappedAtCreation) {
+            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle))
+                .WillOnce(Return(&mBufferContent));
         }
+
+        return handle;
     }
 
-    // Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
-    // data pointer to ReadHandle
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
-        // The client should create and serialize a ReadHandle and a WriteHandle on
-        // createBufferMapped.
-        ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientReadHandle);
-        ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientWriteHandle);
-
-        // The server should then deserialize a ReadHandle and a WriteHandle from the client.
-        ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize();
-        ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization();
-
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1);
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle))
-            .WillOnce(Return(&mBufferContent));
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1);
-        FlushClient();
-
-        // The ReadHandle will be destoryed on buffer destroy.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1);
+    void MockWriteHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
     }
 
-    // Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    void ExpectWriteHandleSerialization(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
 
-        ExpectWriteHandleSerialization(clientHandle);
+    ServerWriteHandle* ExpectServerWriteHandleDeserialization() {
+        // Create the handle first so it can be used in later expectations.
+        ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle();
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) {
+                *writeHandle = handle;
+                return true;
+            }));
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite);
+        return handle;
+    }
+
+    void MockServerWriteHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(Return(false));
+    }
+
+    void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService,
+                    OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeDataUpdate(handle, _, _, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
+                       sizeof(mWriteHandleSerializeDataInfo));
+                return sizeof(mWriteHandleSerializeDataInfo);
+            }));
+    }
+
+    void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
+                                                      uint32_t expectedData) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    // Arbitrary values used within tests to check if serialized data is correctly passed
+    // between the client and server. The static data changes between runs of the tests and
+    // test expectations will check that serialized values are passed to the respective
+    // deserialization function.
+    static uint32_t mSerializeCreateInfo;
+    static uint32_t mReadHandleSerializeDataInfo;
+    static uint32_t mWriteHandleSerializeDataInfo;
+
+    // Represents the buffer contents for the test.
+    static uint32_t mBufferContent;
+
+    static constexpr size_t kBufferSize = sizeof(mBufferContent);
+
+    // The client's zero-initialized buffer for writing.
+    uint32_t mMappedBufferContent = 0;
+
+    // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client
+    // performs a write. Test expectations should check that |mBufferContent ==
+    // mUpdatedBufferContent| after all writes are flushed.
+    static uint32_t mUpdatedBufferContent;
+
+    StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
+    StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
+};
+
+uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
+uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349;
+uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242;
+uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394;
+uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235;
+
+// Test successful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the ReadHandle from the client.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a failed callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) {
+    // Mock a ReadHandle creation failure
+    MockReadHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapRead;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapRead DeserializeReadHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // Mock a Deserialization failure.
+    MockServerReadHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test read handle DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client should receive the handle data update message from the server.
+    // Mock a deserialization failure.
+    MockClientReadHandleDeserializeDataUpdateFailure(clientHandle);
+
+    // Failed deserialization is a fatal failure and the client synchronously receives a
+    // DEVICE_LOST callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1);
+
+    FlushServer(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mapping for reading destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
         FlushClient();
 
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
+        // The handle is already destroyed so unmap only results in a server unmap call.
         wgpuBufferUnmap(buffer);
 
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
         EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
         FlushClient();
+    }
+}
 
-        // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed.
+// Test successful mapping for writing.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update and destroy the handle on Unmap()
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful MapWrite.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite
+    // usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock an error callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapWrite;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapWrite DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite
+    // usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // Mock a deserialization failure.
+    MockServerWriteHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test MapWrite DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a success callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test MapWrite destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        // The handle is destroyed once the buffer is destroyed.
         EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+        wgpuBufferDestroy(buffer);
+
         EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
     }
+}
+
+// Test successful buffer creation with mappedAtCreation = true.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    // After the handle is updated it can be destroyed.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test buffer creation with mappedAtCreation WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent)));
+}
+
+// Test buffer creation with mappedAtCreation DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    MockServerWriteHandleDeserializeFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    // Now bufferGetMappedRange won't be called if deserialize writeHandle fails
+
+    FlushClient(false);
+
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed.
+    // The server side writeHandle is destructed at buffer destruction.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mappedAtCreation=true destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
+    }
+}
+
+// Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
+// data pointer to ReadHandle
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
+    // The client should create and serialize a ReadHandle and a WriteHandle on
+    // createBufferMapped.
+    ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientReadHandle);
+    ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientWriteHandle);
+
+    // The server should then deserialize a ReadHandle and a WriteHandle from the client.
+    ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize();
+    ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1);
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle))
+        .WillOnce(Return(&mBufferContent));
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1);
+    FlushClient();
+
+    // The ReadHandle will be destoryed on buffer destroy.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1);
+}
+
+// Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
index b95a588..2061021 100644
--- a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
@@ -16,171 +16,167 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Return;
+using testing::_;
+using testing::Return;
 
-    class WireOptionalTests : public WireTest {
-      public:
-        WireOptionalTests() {
-        }
-        ~WireOptionalTests() override = default;
-    };
+class WireOptionalTests : public WireTest {
+  public:
+    WireOptionalTests() {}
+    ~WireOptionalTests() override = default;
+};
 
-    // Test passing nullptr instead of objects - object as value version
-    TEST_F(WireOptionalTests, OptionalObjectValue) {
-        WGPUBindGroupLayoutDescriptor bglDesc = {};
-        bglDesc.entryCount = 0;
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
+// Test passing nullptr instead of objects - object as value version
+TEST_F(WireOptionalTests, OptionalObjectValue) {
+    WGPUBindGroupLayoutDescriptor bglDesc = {};
+    bglDesc.entryCount = 0;
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
 
-        WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
-            .WillOnce(Return(apiBindGroupLayout));
+    WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
+        .WillOnce(Return(apiBindGroupLayout));
 
-        // The `sampler`, `textureView` and `buffer` members of a binding are optional.
-        WGPUBindGroupEntry entry;
-        entry.binding = 0;
-        entry.sampler = nullptr;
-        entry.textureView = nullptr;
-        entry.buffer = nullptr;
-        entry.nextInChain = nullptr;
+    // The `sampler`, `textureView` and `buffer` members of a binding are optional.
+    WGPUBindGroupEntry entry;
+    entry.binding = 0;
+    entry.sampler = nullptr;
+    entry.textureView = nullptr;
+    entry.buffer = nullptr;
+    entry.nextInChain = nullptr;
 
-        WGPUBindGroupDescriptor bgDesc = {};
-        bgDesc.layout = bgl;
-        bgDesc.entryCount = 1;
-        bgDesc.entries = &entry;
+    WGPUBindGroupDescriptor bgDesc = {};
+    bgDesc.layout = bgl;
+    bgDesc.entryCount = 1;
+    bgDesc.entries = &entry;
 
-        wgpuDeviceCreateBindGroup(device, &bgDesc);
+    wgpuDeviceCreateBindGroup(device, &bgDesc);
 
-        WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
-        EXPECT_CALL(api,
-                    DeviceCreateBindGroup(
-                        apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
-                            return desc->nextInChain == nullptr && desc->entryCount == 1 &&
-                                   desc->entries[0].binding == 0 &&
-                                   desc->entries[0].sampler == nullptr &&
-                                   desc->entries[0].buffer == nullptr &&
-                                   desc->entries[0].textureView == nullptr;
-                        })))
-            .WillOnce(Return(apiPlaceholderBindGroup));
+    WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(
+                         apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr && desc->entryCount == 1 &&
+                                    desc->entries[0].binding == 0 &&
+                                    desc->entries[0].sampler == nullptr &&
+                                    desc->entries[0].buffer == nullptr &&
+                                    desc->entries[0].textureView == nullptr;
+                         })))
+        .WillOnce(Return(apiPlaceholderBindGroup));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send optional pointers to structures
-    TEST_F(WireOptionalTests, OptionalStructPointer) {
-        // Create shader module
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test that the wire is able to send optional pointers to structures
+TEST_F(WireOptionalTests, OptionalStructPointer) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        // Create the color state descriptor
-        WGPUBlendComponent blendComponent = {};
-        blendComponent.operation = WGPUBlendOperation_Add;
-        blendComponent.srcFactor = WGPUBlendFactor_One;
-        blendComponent.dstFactor = WGPUBlendFactor_One;
-        WGPUBlendState blendState = {};
-        blendState.alpha = blendComponent;
-        blendState.color = blendComponent;
-        WGPUColorTargetState colorTargetState = {};
-        colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
-        colorTargetState.blend = &blendState;
-        colorTargetState.writeMask = WGPUColorWriteMask_All;
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
 
-        // Create the depth-stencil state
-        WGPUStencilFaceState stencilFace = {};
-        stencilFace.compare = WGPUCompareFunction_Always;
-        stencilFace.failOp = WGPUStencilOperation_Keep;
-        stencilFace.depthFailOp = WGPUStencilOperation_Keep;
-        stencilFace.passOp = WGPUStencilOperation_Keep;
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
 
-        WGPUDepthStencilState depthStencilState = {};
-        depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
-        depthStencilState.depthWriteEnabled = false;
-        depthStencilState.depthCompare = WGPUCompareFunction_Always;
-        depthStencilState.stencilBack = stencilFace;
-        depthStencilState.stencilFront = stencilFace;
-        depthStencilState.stencilReadMask = 0xff;
-        depthStencilState.stencilWriteMask = 0xff;
-        depthStencilState.depthBias = 0;
-        depthStencilState.depthBiasSlopeScale = 0.0;
-        depthStencilState.depthBiasClamp = 0.0;
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
 
-        // Create the pipeline layout
-        WGPUPipelineLayoutDescriptor layoutDescriptor = {};
-        layoutDescriptor.bindGroupLayoutCount = 0;
-        layoutDescriptor.bindGroupLayouts = nullptr;
-        WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
-        WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
 
-        // Create pipeline
-        WGPURenderPipelineDescriptor pipelineDescriptor = {};
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
 
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.vertex.bufferCount = 0;
-        pipelineDescriptor.vertex.buffers = nullptr;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        fragment.targetCount = 1;
-        fragment.targets = &colorTargetState;
-        pipelineDescriptor.fragment = &fragment;
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
 
-        pipelineDescriptor.multisample.count = 1;
-        pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
-        pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
-        pipelineDescriptor.layout = layout;
-        pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
-        pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
-        pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
 
-        // First case: depthStencil is not null.
-        pipelineDescriptor.depthStencil = &depthStencilState;
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    // First case: depthStencil is not null.
+    pipelineDescriptor.depthStencil = &depthStencilState;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
 
-        WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
-        EXPECT_CALL(
-            api,
-            DeviceCreateRenderPipeline(
-                apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                    return desc->depthStencil != nullptr &&
-                           desc->depthStencil->nextInChain == nullptr &&
-                           desc->depthStencil->depthWriteEnabled == false &&
-                           desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilBack.depthFailOp ==
-                               WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.depthFailOp ==
-                               WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilReadMask == 0xff &&
-                           desc->depthStencil->stencilWriteMask == 0xff &&
-                           desc->depthStencil->depthBias == 0 &&
-                           desc->depthStencil->depthBiasSlopeScale == 0.0 &&
-                           desc->depthStencil->depthBiasClamp == 0.0;
-                })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(
+        api,
+        DeviceCreateRenderPipeline(
+            apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                return desc->depthStencil != nullptr &&
+                       desc->depthStencil->nextInChain == nullptr &&
+                       desc->depthStencil->depthWriteEnabled == false &&
+                       desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilReadMask == 0xff &&
+                       desc->depthStencil->stencilWriteMask == 0xff &&
+                       desc->depthStencil->depthBias == 0 &&
+                       desc->depthStencil->depthBiasSlopeScale == 0.0 &&
+                       desc->depthStencil->depthBiasClamp == 0.0;
+            })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
+    FlushClient();
 
-        // Second case: depthStencil is null.
-        pipelineDescriptor.depthStencil = nullptr;
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
-        EXPECT_CALL(
-            api, DeviceCreateRenderPipeline(
-                     apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                         return desc->depthStencil == nullptr;
-                     })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    // Second case: depthStencil is null.
+    pipelineDescriptor.depthStencil = nullptr;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->depthStencil == nullptr;
+                    })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireQueueTests.cpp b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
index 536ad77..7e2d677 100644
--- a/src/dawn/tests/unittests/wire/WireQueueTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
@@ -19,129 +19,128 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
 
-    class MockQueueWorkDoneCallback {
-      public:
-        MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
-    };
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
 
-    static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
-    static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
-        mockQueueWorkDoneCallback->Call(status, userdata);
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class WireQueueTests : public WireTest {
+  protected:
+    void SetUp() override {
+        WireTest::SetUp();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
     }
 
-    class WireQueueTests : public WireTest {
-      protected:
-        void SetUp() override {
-            WireTest::SetUp();
-            mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-            mockQueueWorkDoneCallback = nullptr;
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
-        }
-    };
-
-    // Test that a successful OnSubmittedWorkDone call is forwarded to the client.
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this))
-            .Times(1);
-        FlushServer();
+    void TearDown() override {
+        WireTest::TearDown();
+        mockQueueWorkDoneCallback = nullptr;
     }
 
-    // Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
     }
+};
 
-    // Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
-    // device loss
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
+// Test that a successful OnSubmittedWorkDone call is forwarded to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+    FlushServer();
+}
+
+// Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
+    FlushServer();
+}
+
+// Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireQueueTests* pTest;
+    WGPUQueue* pTestQueue;
+    size_t numRequests;
+};
+
+static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestQueue, nullptr);
+    mockQueueWorkDoneCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
+                                     testData->pTest);
     }
+}
 
-    // Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
-    // device loss
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
-        GetWireClient()->Disconnect();
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &queue, 10};
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1);
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-    }
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireQueueTests* pTest;
-        WGPUQueue* pTestQueue;
-        size_t numRequests;
-    };
-
-    static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestQueue, nullptr);
-        mockQueueWorkDoneCallback->Call(status, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
-                                         testData->pTest);
-        }
-    }
-
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
-        TestData testData = {this, &queue, 10};
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
-    }
-
-    // Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
-    // since it is always destructed after the test TearDown, and we cannot create a new queue obj
-    // with wgpuDeviceGetQueue
+// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
+// since it is always destructed after the test TearDown, and we cannot create a new queue obj
+// with wgpuDeviceGetQueue
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
index 48c93d2..67d258b 100644
--- a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
@@ -19,226 +19,224 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Return;
-    using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockCompilationInfoCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCompilationInfoRequestStatus status,
-                         const WGPUCompilationInfo* info,
-                         void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockCompilationInfoCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCompilationInfoRequestStatus status,
+                 const WGPUCompilationInfo* info,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
-        void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
-                                              const WGPUCompilationInfo* info,
-                                              void* userdata) {
-            mockCompilationInfoCallback->Call(status, info, userdata);
-        }
+std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
+void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
+                                      const WGPUCompilationInfo* info,
+                                      void* userdata) {
+    mockCompilationInfoCallback->Call(status, info, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireShaderModuleTests : public WireTest {
-      public:
-        WireShaderModuleTests() {
-        }
-        ~WireShaderModuleTests() override = default;
+class WireShaderModuleTests : public WireTest {
+  public:
+    WireShaderModuleTests() {}
+    ~WireShaderModuleTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockCompilationInfoCallback =
-                std::make_unique<StrictMock<MockCompilationInfoCallback>>();
-            apiShaderModule = api.GetNewShaderModule();
+        mockCompilationInfoCallback = std::make_unique<StrictMock<MockCompilationInfoCallback>>();
+        apiShaderModule = api.GetNewShaderModule();
 
-            WGPUShaderModuleDescriptor descriptor = {};
-            shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
+        WGPUShaderModuleDescriptor descriptor = {};
+        shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
 
-            EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
-                .WillOnce(Return(apiShaderModule))
-                .RetiresOnSaturation();
-            FlushClient();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockCompilationInfoCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
-        }
-
-      protected:
-        WGPUShaderModule shaderModule;
-        WGPUShaderModule apiShaderModule;
-    };
-
-    // Check getting CompilationInfo for a successfully created shader module
-    TEST_F(WireShaderModuleTests, GetCompilationInfo) {
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
-
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
-
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-
+        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
+            .WillOnce(Return(apiShaderModule))
+            .RetiresOnSaturation();
         FlushClient();
-
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_Success,
-                         MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
-                             if (info->messageCount != compilationInfo.messageCount) {
-                                 return false;
-                             }
-                             const WGPUCompilationMessage* infoMessage = &info->messages[0];
-                             return strcmp(infoMessage->message, message.message) == 0 &&
-                                    infoMessage->nextInChain == message.nextInChain &&
-                                    infoMessage->type == message.type &&
-                                    infoMessage->lineNum == message.lineNum &&
-                                    infoMessage->linePos == message.linePos &&
-                                    infoMessage->offset == message.offset &&
-                                    infoMessage->length == message.length;
-                         }),
-                         _))
-            .Times(1);
-        FlushServer();
     }
 
-    // Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
-    // device loss.
-    TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
-
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
-        GetWireClient()->Disconnect();
+        // Delete mock so that expectations are checked
+        mockCompilationInfoCallback = nullptr;
     }
 
-    // Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
-    // device loss.
-    TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
-        GetWireClient()->Disconnect();
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
     }
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireShaderModuleTests* pTest;
-        WGPUShaderModule* pTestShaderModule;
-        size_t numRequests;
-    };
-
-    static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
-                                                       const WGPUCompilationInfo* info,
-                                                       void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestShaderModule, nullptr);
-
-        mockCompilationInfoCallback->Call(status, info, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
-                                               ToMockGetCompilationInfoCallback, nullptr);
-        }
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
     }
 
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
-        TestData testData = {this, &shaderModule, 10};
+  protected:
+    WGPUShaderModule shaderModule;
+    WGPUShaderModule apiShaderModule;
+};
 
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
-                                           &testData);
+// Check getting CompilationInfo for a successfully created shader module
+TEST_F(WireShaderModuleTests, GetCompilationInfo) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
 
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
 
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Success,
+                     MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
+                         if (info->messageCount != compilationInfo.messageCount) {
+                             return false;
+                         }
+                         const WGPUCompilationMessage* infoMessage = &info->messages[0];
+                         return strcmp(infoMessage->message, message.message) == 0 &&
+                                infoMessage->nextInChain == message.nextInChain &&
+                                infoMessage->type == message.type &&
+                                infoMessage->lineNum == message.lineNum &&
+                                infoMessage->linePos == message.linePos &&
+                                infoMessage->offset == message.offset &&
+                                infoMessage->length == message.length;
+                     }),
+                     _))
+        .Times(1);
+    FlushServer();
+}
+
+// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
+// device loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    GetWireClient()->Disconnect();
+}
+
+// Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
+// device loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
+    GetWireClient()->Disconnect();
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireShaderModuleTests* pTest;
+    WGPUShaderModule* pTestShaderModule;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
+                                                   const WGPUCompilationInfo* info,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestShaderModule, nullptr);
+
+    mockCompilationInfoCallback->Call(status, info, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
+                                           ToMockGetCompilationInfoCallback, nullptr);
     }
+}
 
-    // Test that requests inside user callbacks before object destruction are called
-    TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
-        TestData testData = {this, &shaderModule, 10};
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &shaderModule, 10};
 
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
-                                           &testData);
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
 
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
-            .Times(1 + testData.numRequests);
-        wgpuShaderModuleRelease(shaderModule);
-    }
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
+
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &shaderModule, 10};
+
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
+        .Times(1 + testData.numRequests);
+    wgpuShaderModuleRelease(shaderModule);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireTest.cpp b/src/dawn/tests/unittests/wire/WireTest.cpp
index 9397139..4a9f0d6 100644
--- a/src/dawn/tests/unittests/wire/WireTest.cpp
+++ b/src/dawn/tests/unittests/wire/WireTest.cpp
@@ -25,11 +25,9 @@
 using testing::Mock;
 using testing::Return;
 
-WireTest::WireTest() {
-}
+WireTest::WireTest() {}
 
-WireTest::~WireTest() {
-}
+WireTest::~WireTest() {}
 
 dawn::wire::client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
     return nullptr;
diff --git a/src/dawn/tests/unittests/wire/WireTest.h b/src/dawn/tests/unittests/wire/WireTest.h
index cd91c3e..0cbd156 100644
--- a/src/dawn/tests/unittests/wire/WireTest.h
+++ b/src/dawn/tests/unittests/wire/WireTest.h
@@ -40,12 +40,9 @@
 template <typename Lambda, typename Arg>
 class LambdaMatcherImpl : public testing::MatcherInterface<Arg> {
   public:
-    explicit LambdaMatcherImpl(Lambda lambda) : mLambda(lambda) {
-    }
+    explicit LambdaMatcherImpl(Lambda lambda) : mLambda(lambda) {}
 
-    void DescribeTo(std::ostream* os) const override {
-        *os << "with a custom matcher";
-    }
+    void DescribeTo(std::ostream* os) const override { *os << "with a custom matcher"; }
 
     bool MatchAndExplain(Arg value, testing::MatchResultListener* listener) const override {
         if (!mLambda(value)) {
@@ -71,8 +68,7 @@
 
 class StringMessageMatcher : public testing::MatcherInterface<const char*> {
   public:
-    StringMessageMatcher() {
-    }
+    StringMessageMatcher() {}
 
     bool MatchAndExplain(const char* message,
                          testing::MatchResultListener* listener) const override {
@@ -87,13 +83,9 @@
         return true;
     }
 
-    void DescribeTo(std::ostream* os) const override {
-        *os << "valid error message";
-    }
+    void DescribeTo(std::ostream* os) const override { *os << "valid error message"; }
 
-    void DescribeNegationTo(std::ostream* os) const override {
-        *os << "invalid error message";
-    }
+    void DescribeNegationTo(std::ostream* os) const override { *os << "invalid error message"; }
 };
 
 inline testing::Matcher<const char*> ValidStringMessage() {
@@ -101,18 +93,18 @@
 }
 
 namespace dawn::wire {
-    class WireClient;
-    class WireServer;
-    namespace client {
-        class MemoryTransferService;
-    }  // namespace client
-    namespace server {
-        class MemoryTransferService;
-    }  // namespace server
+class WireClient;
+class WireServer;
+namespace client {
+class MemoryTransferService;
+}  // namespace client
+namespace server {
+class MemoryTransferService;
+}  // namespace server
 }  // namespace dawn::wire
 
 namespace utils {
-    class TerribleCommandBuffer;
+class TerribleCommandBuffer;
 }
 
 class WireTest : public testing::Test {
diff --git a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
index cc020bc..16f8c54 100644
--- a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
+++ b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
@@ -28,21 +28,21 @@
 
 namespace dawn::native::d3d12 {
 
-    constexpr uint32_t kRTSize = 4;
+constexpr uint32_t kRTSize = 4;
 
-    // Pooling tests are required to advance the GPU completed serial to reuse heaps.
-    // This requires Tick() to be called at-least |kFrameDepth| times. This constant
-    // should be updated if the internals of Tick() change.
-    constexpr uint32_t kFrameDepth = 2;
+// Pooling tests are required to advance the GPU completed serial to reuse heaps.
+// This requires Tick() to be called at-least |kFrameDepth| times. This constant
+// should be updated if the internals of Tick() change.
+constexpr uint32_t kFrameDepth = 2;
 
-    class D3D12DescriptorHeapTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-            mD3DDevice = reinterpret_cast<Device*>(device.Get());
+class D3D12DescriptorHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        mD3DDevice = reinterpret_cast<Device*>(device.Get());
 
-            mSimpleVSModule = utils::CreateShaderModule(device, R"(
+        mSimpleVSModule = utils::CreateShaderModule(device, R"(
 
             @stage(vertex) fn main(
                 @builtin(vertex_index) VertexIndex : u32
@@ -55,7 +55,7 @@
                 return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
             })");
 
-            mSimpleFSModule = utils::CreateShaderModule(device, R"(
+        mSimpleFSModule = utils::CreateShaderModule(device, R"(
             struct U {
                 color : vec4<f32>
             }
@@ -64,397 +64,390 @@
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return colorBuffer.color;
             })");
-        }
-
-        utils::BasicRenderPass MakeRenderPass(uint32_t width,
-                                              uint32_t height,
-                                              wgpu::TextureFormat format) {
-            DAWN_ASSERT(width > 0 && height > 0);
-
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = width;
-            descriptor.size.height = height;
-            descriptor.size.depthOrArrayLayers = 1;
-            descriptor.sampleCount = 1;
-            descriptor.format = format;
-            descriptor.mipLevelCount = 1;
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
-            wgpu::Texture color = device.CreateTexture(&descriptor);
-
-            return utils::BasicRenderPass(width, height, color);
-        }
-
-        std::array<float, 4> GetSolidColor(uint32_t n) const {
-            ASSERT(n >> 24 == 0);
-            float b = (n & 0xFF) / 255.0f;
-            float g = ((n >> 8) & 0xFF) / 255.0f;
-            float r = ((n >> 16) & 0xFF) / 255.0f;
-            return {r, g, b, 1};
-        }
-
-        Device* mD3DDevice = nullptr;
-
-        wgpu::ShaderModule mSimpleVSModule;
-        wgpu::ShaderModule mSimpleFSModule;
-    };
-
-    class PlaceholderStagingDescriptorAllocator {
-      public:
-        PlaceholderStagingDescriptorAllocator(Device* device,
-                                              uint32_t descriptorCount,
-                                              uint32_t allocationsPerHeap)
-            : mAllocator(device,
-                         descriptorCount,
-                         allocationsPerHeap * descriptorCount,
-                         D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {
-        }
-
-        CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
-            dawn::native::ResultOrError<CPUDescriptorHeapAllocation> result =
-                mAllocator.AllocateCPUDescriptors();
-            return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
-        }
-
-        void Deallocate(CPUDescriptorHeapAllocation& allocation) {
-            mAllocator.Deallocate(&allocation);
-        }
-
-      private:
-        StagingDescriptorAllocator mAllocator;
-    };
-
-    // Verify the shader visible view heaps switch over within a single submit.
-    TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
-
-        // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
-        // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
-        renderPipelineDescriptor.vertex.module = mSimpleVSModule;
-        renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
-
-        wgpu::RenderPipeline renderPipeline =
-            device.CreateRenderPipeline(&renderPipelineDescriptor);
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-
-        Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
-        ShaderVisibleDescriptorAllocator* allocator =
-            d3dDevice->GetViewShaderVisibleDescriptorAllocator();
-        const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
-
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-            pass.SetPipeline(renderPipeline);
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                pass.SetBindGroup(0,
-                                  utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                                       {{0, uniformBuffer, 0, sizeof(redColor)}}));
-                pass.Draw(3);
-            }
-
-            pass.End();
-        }
-
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
     }
 
-    // Verify the shader visible sampler heaps does not switch over within a single submit.
-    TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
-        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    utils::BasicRenderPass MakeRenderPass(uint32_t width,
+                                          uint32_t height,
+                                          wgpu::TextureFormat format) {
+        DAWN_ASSERT(width > 0 && height > 0);
 
-        // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating
-        // a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
-        // because the sampler heap allocations are de-duplicated.
-        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture color = device.CreateTexture(&descriptor);
+
+        return utils::BasicRenderPass(width, height, color);
+    }
+
+    std::array<float, 4> GetSolidColor(uint32_t n) const {
+        ASSERT(n >> 24 == 0);
+        float b = (n & 0xFF) / 255.0f;
+        float g = ((n >> 8) & 0xFF) / 255.0f;
+        float r = ((n >> 16) & 0xFF) / 255.0f;
+        return {r, g, b, 1};
+    }
+
+    Device* mD3DDevice = nullptr;
+
+    wgpu::ShaderModule mSimpleVSModule;
+    wgpu::ShaderModule mSimpleFSModule;
+};
+
+class PlaceholderStagingDescriptorAllocator {
+  public:
+    PlaceholderStagingDescriptorAllocator(Device* device,
+                                          uint32_t descriptorCount,
+                                          uint32_t allocationsPerHeap)
+        : mAllocator(device,
+                     descriptorCount,
+                     allocationsPerHeap * descriptorCount,
+                     D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {}
+
+    CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
+        dawn::native::ResultOrError<CPUDescriptorHeapAllocation> result =
+            mAllocator.AllocateCPUDescriptors();
+        return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
+    }
+
+    void Deallocate(CPUDescriptorHeapAllocation& allocation) { mAllocator.Deallocate(&allocation); }
+
+  private:
+    StagingDescriptorAllocator mAllocator;
+};
+
+// Verify the shader visible view heaps switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
+    // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
+    renderPipelineDescriptor.vertex.module = mSimpleVSModule;
+    renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetViewShaderVisibleDescriptorAllocator();
+    const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer, 0, sizeof(redColor)}}));
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
+}
+
+// Verify the shader visible sampler heaps does not switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating
+    // a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
+    // because the sampler heap allocations are de-duplicated.
+    renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })");
 
-        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
             @group(0) @binding(0) var sampler0 : sampler;
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 _ = sampler0;
                 return vec4<f32>(0.0, 0.0, 0.0, 0.0);
             })");
 
-        wgpu::RenderPipeline renderPipeline =
-            device.CreateRenderPipeline(&renderPipelineDescriptor);
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-        wgpu::Sampler sampler = device.CreateSampler();
+    wgpu::Sampler sampler = device.CreateSampler();
 
-        Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
-        ShaderVisibleDescriptorAllocator* allocator =
-            d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
-        const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
 
-        const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
+    const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
-            pass.SetPipeline(renderPipeline);
+        pass.SetPipeline(renderPipeline);
 
-            for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
-                pass.SetBindGroup(0,
-                                  utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                                       {{0, sampler}}));
-                pass.Draw(3);
-            }
-
-            pass.End();
+        for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, sampler}}));
+            pass.Draw(3);
         }
 
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
+        pass.End();
     }
 
-    // Verify shader-visible heaps can be recycled for multiple submits.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
 
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
+}
 
-        std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+// Verify shader-visible heaps can be recycled for multiple submits.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always
-        // unique.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.push_back(heap);
-            // CheckPassedSerials() will update the last internally completed serial.
-            EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
-            // NextSerial() will increment the last internally submitted serial.
-            EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
-        }
+    std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
-        // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in
-        // the check.
-        for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(heaps.front() == heap);
-            heaps.pop_front();
-            EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
-            EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
-        }
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
 
-        EXPECT_TRUE(heaps.empty());
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
+    // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always
+    // unique.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.push_back(heap);
+        // CheckPassedSerials() will update the last internally completed serial.
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        // NextSerial() will increment the last internally submitted serial.
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
     }
 
-    // Verify shader-visible heaps do not recycle in a pending submit.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        constexpr uint32_t kNumOfSwitches = 5;
-
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        // After |kNumOfSwitches|, no heaps are recycled.
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
+    // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in
+    // the check.
+    for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(heaps.front() == heap);
+        heaps.pop_front();
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
     }
 
-    // Verify switching shader-visible heaps do not recycle in a pending submit but do so
-    // once no longer pending.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    EXPECT_TRUE(heaps.empty());
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
+}
 
-        constexpr uint32_t kNumOfSwitches = 5;
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+    constexpr uint32_t kNumOfSwitches = 5;
 
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
 
-        // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
-        // |kFrameDepth|.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            mD3DDevice->APITick();
-        }
-
-        // Switch-over |kNumOfSwitches| again reusing the same heaps.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
-            heaps.erase(heap);
-        }
-
-        // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches * 2));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
     }
 
-    // Verify shader-visible heaps do not recycle in multiple submits.
-    TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    // After |kNumOfSwitches|, no heaps are recycled.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
 
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+    constexpr uint32_t kNumOfSwitches = 5;
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        // Growth: Allocate + Tick() and ensure heaps are always unique.
-        while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-            mD3DDevice->APITick();
-        }
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        // Verify the number of switches equals the size of heaps allocated (minus the initial).
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(heaps.size() - 1));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
     }
 
-    // Verify shader-visible heaps do not recycle in a pending submit.
-    TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
 
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        // Growth: Allocate new heaps.
-        while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        // Verify the number of switches equals the size of heaps allocated (minus the initial).
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(heaps.size() - 1));
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
+    // |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
     }
 
-    // Verify switching shader-visible heaps do not recycle in a pending submit but do so
-    // once no longer pending.
-    // Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
-    TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        uint32_t kNumOfPooledHeaps = 5;
-        while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
-
-        // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
-        // |kFrameDepth|.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            mD3DDevice->APITick();
-        }
-
-        // Switch-over the pool-allocated heaps.
-        for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-        }
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+    // Switch-over |kNumOfSwitches| again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
+        heaps.erase(heap);
     }
 
-    // Verify encoding multiple heaps worth of bindgroups.
-    // Shader-visible heaps will switch out |kNumOfHeaps| times.
-    TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
-        // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup
-        // that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize|
-        // draws, the result is the arithmetic sum of the sequence after the framebuffer is blended
-        // by accumulation. By checking for this sum, we ensure each bindgroup was encoded
-        // correctly.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches * 2));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
 
-        utils::BasicRenderPass renderPass =
-            MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
+// Verify shader-visible heaps do not recycle in multiple submits.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate + Tick() and ensure heaps are always unique.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+        mD3DDevice->APITick();
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate new heaps.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+// Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
+TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    uint32_t kNumOfPooledHeaps = 5;
+    while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
+    // |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
+    }
+
+    // Switch-over the pool-allocated heaps.
+    for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+}
+
+// Verify encoding multiple heaps worth of bindgroups.
+// Shader-visible heaps will switch out |kNumOfHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
+    // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup
+    // that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize|
+    // draws, the result is the arithmetic sum of the sequence after the framebuffer is blended
+    // by accumulation. By checking for this sum, we ensure each bindgroup was encoded
+    // correctly.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass =
+        MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+
+    pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
         struct U {
             heapSize : f32
         }
@@ -464,42 +457,130 @@
             return vec4<f32>(buffer0.heapSize, 0.0, 0.0, 1.0);
         })");
 
-        wgpu::BlendState blend;
-        blend.color.operation = wgpu::BlendOperation::Add;
-        blend.color.srcFactor = wgpu::BlendFactor::One;
-        blend.color.dstFactor = wgpu::BlendFactor::One;
-        blend.alpha.operation = wgpu::BlendOperation::Add;
-        blend.alpha.srcFactor = wgpu::BlendFactor::One;
-        blend.alpha.dstFactor = wgpu::BlendFactor::One;
+    wgpu::BlendState blend;
+    blend.color.operation = wgpu::BlendOperation::Add;
+    blend.color.srcFactor = wgpu::BlendFactor::One;
+    blend.color.dstFactor = wgpu::BlendFactor::One;
+    blend.alpha.operation = wgpu::BlendOperation::Add;
+    blend.alpha.srcFactor = wgpu::BlendFactor::One;
+    blend.alpha.dstFactor = wgpu::BlendFactor::One;
 
-        pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
-        pipelineDescriptor.cTargets[0].blend = &blend;
+    pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
+    pipelineDescriptor.cTargets[0].blend = &blend;
 
-        wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
+
+    std::vector<wgpu::BindGroup> bindGroups;
+    for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
+        const float color = i + 1;
+        wgpu::Buffer uniformBuffer =
+            utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
+    EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
+}
+
+// Verify encoding one bindgroup then a heaps worth in different submits.
+// Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
+// The first descriptor's memory will be reused when the second submit encodes |heapSize|
+// descriptors.
+TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode the first descriptor and submit.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(renderPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+
+    // Encode a heap worth of descriptors.
+    {
+        const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
                                       ->GetShaderVisibleHeapSizeForTesting();
 
-        constexpr uint32_t kNumOfHeaps = 2;
-
-        const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
-
         std::vector<wgpu::BindGroup> bindGroups;
-        for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
-            const float color = i + 1;
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(device, &color, sizeof(color),
-                                                                     wgpu::BufferUsage::Uniform);
+        for (uint32_t i = 0; i < heapSize - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
             bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
                                                       {{0, uniformBuffer}}));
         }
 
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
+
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
             pass.SetPipeline(renderPipeline);
 
-            for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            for (uint32_t i = 0; i < heapSize; ++i) {
                 pass.SetBindGroup(0, bindGroups[i]);
                 pass.Draw(3);
             }
@@ -509,135 +590,133 @@
 
         wgpu::CommandBuffer commands = encoder.Finish();
         queue.Submit(1, &commands);
-
-        float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
-        EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
     }
 
-    // Verify encoding one bindgroup then a heaps worth in different submits.
-    // Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
-    // The first descriptor's memory will be reused when the second submit encodes |heapSize|
-    // descriptors.
-    TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
 
-        // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
-        DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+// Verify encoding a heaps worth of bindgroups plus one more then reuse the first
+// bindgroup in the same submit.
+// Shader-visible heaps should switch out once then re-encode the first descriptor at a new
+// offset in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-        // Encode the first descriptor and submit.
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        const std::array<float, 4>& fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        // Encode a heap worth of descriptors plus one more.
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        // Re-encode the first bindgroup again.
+        pass.SetBindGroup(0, bindGroups[0]);
+        pass.Draw(3);
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Make sure the first bindgroup was encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
+// first bindgroup again in the second submit.
+// Shader-visible heaps should switch out once then re-encode the
+// first descriptor at the same offset in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode heap worth of descriptors plus one more.
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
-            std::array<float, 4> greenColor = {0, 1, 0, 1};
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
-            wgpu::BindGroup bindGroup = utils::MakeBindGroup(
-                device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
+            pass.SetPipeline(pipeline);
 
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(renderPipeline);
-                pass.SetBindGroup(0, bindGroup);
+            for (uint32_t i = 0; i < heapSize + 1; ++i) {
+                pass.SetBindGroup(0, bindGroups[i]);
                 pass.Draw(3);
-                pass.End();
             }
 
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
+            pass.End();
         }
 
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
-
-        // Encode a heap worth of descriptors.
-        {
-            const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
-                                          ->GetShaderVisibleHeapSizeForTesting();
-
-            std::vector<wgpu::BindGroup> bindGroups;
-            for (uint32_t i = 0; i < heapSize - 1; i++) {
-                std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-                wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                    device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-
-                bindGroups.push_back(utils::MakeBindGroup(
-                    device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}}));
-            }
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(
-                utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                     {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(renderPipeline);
-
-                for (uint32_t i = 0; i < heapSize; ++i) {
-                    pass.SetBindGroup(0, bindGroups[i]);
-                    pass.Draw(3);
-                }
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
     }
 
-    // Verify encoding a heaps worth of bindgroups plus one more then reuse the first
-    // bindgroup in the same submit.
-    // Shader-visible heaps should switch out once then re-encode the first descriptor at a new
-    // offset in the heap.
-    TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
-
-        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
-
-        std::array<float, 4> redColor = {1, 0, 0, 1};
-        wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
-            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-        std::vector<wgpu::BindGroup> bindGroups = {
-            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                 {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
-
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
-                                      ->GetShaderVisibleHeapSizeForTesting();
-
-        for (uint32_t i = 0; i < heapSize; i++) {
-            const std::array<float, 4>& fillColor = GetSolidColor(i + 1);  // Avoid black
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                                      {{0, uniformBuffer, 0, sizeof(fillColor)}}));
-        }
+    // Re-encode the first bindgroup again.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
 
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
@@ -645,13 +724,6 @@
 
             pass.SetPipeline(pipeline);
 
-            // Encode a heap worth of descriptors plus one more.
-            for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                pass.SetBindGroup(0, bindGroups[i]);
-                pass.Draw(3);
-            }
-
-            // Re-encode the first bindgroup again.
             pass.SetBindGroup(0, bindGroups[0]);
             pass.Draw(3);
 
@@ -660,137 +732,55 @@
 
         wgpu::CommandBuffer commands = encoder.Finish();
         queue.Submit(1, &commands);
-
-        // Make sure the first bindgroup was encoded correctly.
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
     }
 
-    // Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
-    // first bindgroup again in the second submit.
-    // Shader-visible heaps should switch out once then re-encode the
-    // first descriptor at the same offset in the heap.
-    TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    // Make sure the first bindgroup was re-encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+}
 
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+// Verify encoding many sampler and ubo worth of bindgroups.
+// Shader-visible heaps should switch out |kNumOfViewHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
+    // Create a solid filled texture.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = kRTSize;
+    descriptor.size.height = kRTSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                       wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    {
+        utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
+
+        utils::ComboRenderPassDescriptor renderPassDesc({textureView});
+        renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+        renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.End();
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        RGBA8 filled(0, 255, 0, 255);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+    }
+
+    {
         utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
-
-        // Encode heap worth of descriptors plus one more.
-        std::array<float, 4> redColor = {1, 0, 0, 1};
-
-        wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
-            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-        std::vector<wgpu::BindGroup> bindGroups = {
-            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                 {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
-
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
-                                      ->GetShaderVisibleHeapSizeForTesting();
-
-        for (uint32_t i = 0; i < heapSize; i++) {
-            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                                      {{0, uniformBuffer, 0, sizeof(fillColor)}}));
-        }
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(pipeline);
-
-                for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                    pass.SetBindGroup(0, bindGroups[i]);
-                    pass.Draw(3);
-                }
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Re-encode the first bindgroup again.
-        {
-            std::array<float, 4> greenColor = {0, 1, 0, 1};
-            queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(pipeline);
-
-                pass.SetBindGroup(0, bindGroups[0]);
-                pass.Draw(3);
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Make sure the first bindgroup was re-encoded correctly.
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
-    }
-
-    // Verify encoding many sampler and ubo worth of bindgroups.
-    // Shader-visible heaps should switch out |kNumOfViewHeaps| times.
-    TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        // Create a solid filled texture.
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = kRTSize;
-        descriptor.size.height = kRTSize;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding |
-                           wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
-
-        {
-            utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
-
-            utils::ComboRenderPassDescriptor renderPassDesc({textureView});
-            renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
-            renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
-            renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            auto pass = encoder.BeginRenderPass(&renderPassDesc);
-            pass.End();
-
-            wgpu::CommandBuffer commandBuffer = encoder.Finish();
-            queue.Submit(1, &commandBuffer);
-
-            RGBA8 filled(0, 255, 0, 255);
-            EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
-        }
-
-        {
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-
-            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
             struct U {
                 transform : mat2x2<f32>
             }
@@ -806,7 +796,7 @@
                 );
                 return vec4<f32>(buffer0.transform * (pos[VertexIndex]), 0.0, 1.0);
             })");
-            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
             struct U {
                 color : vec4<f32>
             }
@@ -820,251 +810,247 @@
                 return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color;
             })");
 
-            utils::BasicRenderPass renderPass =
-                utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-            pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Encode a heap worth of descriptors |kNumOfHeaps| times.
-            constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
-            wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
-                device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
+        // Encode a heap worth of descriptors |kNumOfHeaps| times.
+        constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
+        wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
+            device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
 
-            wgpu::SamplerDescriptor samplerDescriptor;
-            wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
+        wgpu::SamplerDescriptor samplerDescriptor;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
 
-            ShaderVisibleDescriptorAllocator* viewAllocator =
-                mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
+        ShaderVisibleDescriptorAllocator* viewAllocator =
+            mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
 
-            ShaderVisibleDescriptorAllocator* samplerAllocator =
-                mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+        ShaderVisibleDescriptorAllocator* samplerAllocator =
+            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-            const HeapVersionID viewHeapSerial =
-                viewAllocator->GetShaderVisibleHeapSerialForTesting();
-            const HeapVersionID samplerHeapSerial =
-                samplerAllocator->GetShaderVisibleHeapSerialForTesting();
+        const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting();
+        const HeapVersionID samplerHeapSerial =
+            samplerAllocator->GetShaderVisibleHeapSerialForTesting();
 
-            const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
+        const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
 
-            // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
-            // group. This means the count of heaps switches is determined by the total number of
-            // views to encode. Compute the number of bindgroups to encode by counting the required
-            // views for |kNumOfViewHeaps| heaps worth.
-            constexpr uint32_t kViewsPerBindGroup = 3;
-            constexpr uint32_t kNumOfViewHeaps = 5;
+        // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
+        // group. This means the count of heaps switches is determined by the total number of
+        // views to encode. Compute the number of bindgroups to encode by counting the required
+        // views for |kNumOfViewHeaps| heaps worth.
+        constexpr uint32_t kViewsPerBindGroup = 3;
+        constexpr uint32_t kNumOfViewHeaps = 5;
 
-            const uint32_t numOfEncodedBindGroups =
-                (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
+        const uint32_t numOfEncodedBindGroups =
+            (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
 
-            std::vector<wgpu::BindGroup> bindGroups;
-            for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
-                std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-                wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                    device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+        std::vector<wgpu::BindGroup> bindGroups;
+        for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
 
-                bindGroups.push_back(
-                    utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                         {{0, transformBuffer, 0, sizeof(transform)},
-                                          {1, sampler},
-                                          {2, textureView},
-                                          {3, uniformBuffer, 0, sizeof(fillColor)}}));
-            }
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(
-                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                     {{0, transformBuffer, 0, sizeof(transform)},
-                                      {1, sampler},
-                                      {2, textureView},
-                                      {3, lastUniformBuffer, 0, sizeof(redColor)}}));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-            pass.SetPipeline(pipeline);
-
-            for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
-                pass.SetBindGroup(0, bindGroups[i]);
-                pass.Draw(3);
-            }
-
-            pass.End();
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-
-            // Final accumulated color is result of sampled + UBO color.
-            RGBA8 filled(255, 255, 0, 255);
-            RGBA8 notFilled(0, 0, 0, 0);
-            EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
-            EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
-
-            EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
-            EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
-                      viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
-
-            EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-            EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
+            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {{0, transformBuffer, 0, sizeof(transform)},
+                                                       {1, sampler},
+                                                       {2, textureView},
+                                                       {3, uniformBuffer, 0, sizeof(fillColor)}}));
         }
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, transformBuffer, 0, sizeof(transform)},
+                                                   {1, sampler},
+                                                   {2, textureView},
+                                                   {3, lastUniformBuffer, 0, sizeof(redColor)}}));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Final accumulated color is result of sampled + UBO color.
+        RGBA8 filled(255, 255, 0, 255);
+        RGBA8 notFilled(0, 0, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
+
+        EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
+        EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
+                  viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
+
+        EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+        EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
+    }
+}
+
+// Verify a single allocate/deallocate.
+// One non-shader visible heap will be created.
+TEST_P(D3D12DescriptorHeapTests, Single) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+    EXPECT_EQ(allocation.GetHeapIndex(), 0u);
+    EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+
+    allocator.Deallocate(allocation);
+    EXPECT_FALSE(allocation.IsValid());
+}
+
+// Verify allocating many times causes the pool to increase in size.
+// Creates |kNumOfHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, Sequential) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    // Allocate |kNumOfHeaps| worth.
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    std::set<uint32_t> allocatedHeaps;
+
+    std::vector<CPUDescriptorHeapAllocation> allocations;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+        allocatedHeaps.insert(allocation.GetHeapIndex());
     }
 
-    // Verify a single allocate/deallocate.
-    // One non-shader visible heap will be created.
-    TEST_P(D3D12DescriptorHeapTests, Single) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 3;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
 
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+// Verify that re-allocating a number of allocations < pool size, all heaps are reused.
+// Creates and reuses |kNumofHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    constexpr uint32_t kNumofHeaps = 10;
+
+    std::list<CPUDescriptorHeapAllocation> allocations;
+    std::set<size_t> allocationPtrs;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
         CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-        EXPECT_EQ(allocation.GetHeapIndex(), 0u);
-        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+        EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+    }
 
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
         allocator.Deallocate(allocation);
         EXPECT_FALSE(allocation.IsValid());
     }
 
-    // Verify allocating many times causes the pool to increase in size.
-    // Creates |kNumOfHeaps| non-shader visible heaps.
-    TEST_P(D3D12DescriptorHeapTests, Sequential) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 3;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    allocations.clear();
 
-        // Allocate |kNumOfHeaps| worth.
-        constexpr uint32_t kNumOfHeaps = 2;
+    // Re-allocate all again.
+    std::set<size_t> reallocatedPtrs;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        allocations.push_back(allocation);
+        EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+        EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
+                              allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
+    }
 
-        std::set<uint32_t> allocatedHeaps;
+    // Deallocate all again.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
 
-        std::vector<CPUDescriptorHeapAllocation> allocations;
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+// Verify allocating then deallocating many times.
+TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    std::list<CPUDescriptorHeapAllocation> list3;
+    std::list<CPUDescriptorHeapAllocation> list5;
+    std::list<CPUDescriptorHeapAllocation> allocations;
+
+    constexpr uint32_t kNumofHeaps = 2;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 3 == 0) {
+            list3.push_back(allocation);
+        } else {
             allocations.push_back(allocation);
-            allocatedHeaps.insert(allocation.GetHeapIndex());
-        }
-
-        EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
-
-        // Deallocate all.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
         }
     }
 
-    // Verify that re-allocating a number of allocations < pool size, all heaps are reused.
-    // Creates and reuses |kNumofHeaps| non-shader visible heaps.
-    TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 25;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    // Deallocate every 3rd allocation.
+    for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
+        allocator.Deallocate(*it);
+    }
 
-        constexpr uint32_t kNumofHeaps = 10;
-
-        std::list<CPUDescriptorHeapAllocation> allocations;
-        std::set<size_t> allocationPtrs;
-
-        // Allocate |kNumofHeaps| heaps worth.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 5 == 0) {
+            list5.push_back(allocation);
+        } else {
             allocations.push_back(allocation);
-            EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
-        }
-
-        // Deallocate all.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
-        }
-
-        allocations.clear();
-
-        // Re-allocate all again.
-        std::set<size_t> reallocatedPtrs;
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            allocations.push_back(allocation);
-            EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
-            EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
-                                  allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
-        }
-
-        // Deallocate all again.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
         }
     }
 
-    // Verify allocating then deallocating many times.
-    TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 25;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
-
-        std::list<CPUDescriptorHeapAllocation> list3;
-        std::list<CPUDescriptorHeapAllocation> list5;
-        std::list<CPUDescriptorHeapAllocation> allocations;
-
-        constexpr uint32_t kNumofHeaps = 2;
-
-        // Allocate |kNumofHeaps| heaps worth.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            if (i % 3 == 0) {
-                list3.push_back(allocation);
-            } else {
-                allocations.push_back(allocation);
-            }
-        }
-
-        // Deallocate every 3rd allocation.
-        for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
-            allocator.Deallocate(*it);
-        }
-
-        // Allocate again.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            if (i % 5 == 0) {
-                list5.push_back(allocation);
-            } else {
-                allocations.push_back(allocation);
-            }
-        }
-
-        // Deallocate every 5th allocation.
-        for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
-            allocator.Deallocate(*it);
-        }
-
-        // Allocate again.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            allocations.push_back(allocation);
-        }
-
-        // Deallocate remaining.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
-        }
+    // Deallocate every 5th allocation.
+    for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
+        allocator.Deallocate(*it);
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
-                          D3D12Backend(),
-                          D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+    }
+
+    // Deallocate remaining.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
+                      D3D12Backend(),
+                      D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
index b87d564..2bad627 100644
--- a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
+++ b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
@@ -21,100 +21,100 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
-        class ExpectBetweenTimestamps : public ::detail::Expectation {
-          public:
-            ~ExpectBetweenTimestamps() override = default;
+namespace {
+class ExpectBetweenTimestamps : public ::detail::Expectation {
+  public:
+    ~ExpectBetweenTimestamps() override = default;
 
-            ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
-                mValue0 = value0;
-                mValue1 = value1;
-            }
-
-            // Expect the actual results are between mValue0 and mValue1.
-            testing::AssertionResult Check(const void* data, size_t size) override {
-                const uint64_t* actual = static_cast<const uint64_t*>(data);
-                for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
-                    if (actual[i] < mValue0 || actual[i] > mValue1) {
-                        return testing::AssertionFailure()
-                               << "Expected data[" << i << "] to be between " << mValue0 << " and "
-                               << mValue1 << ", actual " << actual[i] << std::endl;
-                    }
-                }
-
-                return testing::AssertionSuccess();
-            }
-
-          private:
-            uint64_t mValue0;
-            uint64_t mValue1;
-        };
-
-    }  // anonymous namespace
-
-    class D3D12GPUTimestampCalibrationTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-            // Requires that timestamp query feature is enabled and timestamp query conversion is
-            // disabled.
-            DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
-                                     !HasToggleEnabled("disable_timestamp_query_conversion"));
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            std::vector<wgpu::FeatureName> requiredFeatures = {};
-            if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
-                requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
-            }
-            return requiredFeatures;
-        }
-    };
-
-    // Check that the timestamps got by timestamp query are between the two timestamps from
-    // GetClockCalibration() after the timestamp conversion is disabled.
-    TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
-        constexpr uint32_t kQueryCount = 2;
-
-        wgpu::QuerySetDescriptor querySetDescriptor;
-        querySetDescriptor.count = kQueryCount;
-        querySetDescriptor.type = wgpu::QueryType::Timestamp;
-        wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
-
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
-        bufferDescriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
-                                 wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        encoder.WriteTimestamp(querySet, 0);
-        encoder.WriteTimestamp(querySet, 1);
-        wgpu::CommandBuffer commands = encoder.Finish();
-
-        Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
-        uint64_t gpuTimestamp0, gpuTimestamp1;
-        uint64_t cpuTimestamp0, cpuTimestamp1;
-        d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
-        queue.Submit(1, &commands);
-        WaitForAllOperations();
-        d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
-
-        // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
-        // so that the timestamp in the querySet will be closer to both gpuTimestamps from
-        // GetClockCalibration.
-        wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
-        resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
-        wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
-        queue.Submit(1, &resolveCommands);
-
-        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
-                      new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
+    ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
+        mValue0 = value0;
+        mValue1 = value1;
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
-                          D3D12Backend({"disable_timestamp_query_conversion"}));
+    // Expect the actual results are between mValue0 and mValue1.
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        const uint64_t* actual = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
+            if (actual[i] < mValue0 || actual[i] > mValue1) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be between " << mValue0 << " and "
+                       << mValue1 << ", actual " << actual[i] << std::endl;
+            }
+        }
+
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    uint64_t mValue0;
+    uint64_t mValue1;
+};
+
+}  // anonymous namespace
+
+class D3D12GPUTimestampCalibrationTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        // Requires that timestamp query feature is enabled and timestamp query conversion is
+        // disabled.
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
+                                 !HasToggleEnabled("disable_timestamp_query_conversion"));
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
+        }
+        return requiredFeatures;
+    }
+};
+
+// Check that the timestamps got by timestamp query are between the two timestamps from
+// GetClockCalibration() after the timestamp conversion is disabled.
+TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySetDescriptor querySetDescriptor;
+    querySetDescriptor.count = kQueryCount;
+    querySetDescriptor.type = wgpu::QueryType::Timestamp;
+    wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
+    bufferDescriptor.usage =
+        wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.WriteTimestamp(querySet, 0);
+    encoder.WriteTimestamp(querySet, 1);
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
+    uint64_t gpuTimestamp0, gpuTimestamp1;
+    uint64_t cpuTimestamp0, cpuTimestamp1;
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
+    queue.Submit(1, &commands);
+    WaitForAllOperations();
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
+
+    // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
+    // so that the timestamp in the querySet will be closer to both gpuTimestamps from
+    // GetClockCalibration.
+    wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
+    resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
+    queue.Submit(1, &resolveCommands);
+
+    EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
+                  new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
+}
+
+DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
+                      D3D12Backend({"disable_timestamp_query_conversion"}));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
index 911b2eb..d1a3ff0 100644
--- a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
+++ b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
@@ -20,91 +20,89 @@
 
 namespace dawn::native::d3d12 {
 
-    class D3D12ResourceHeapTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
-            if (!mIsBCFormatSupported) {
-                return {};
-            }
-
-            return {wgpu::FeatureName::TextureCompressionBC};
-        }
-
-        bool IsBCFormatSupported() const {
-            return mIsBCFormatSupported;
-        }
-
-      private:
-        bool mIsBCFormatSupported = false;
-    };
-
-    // Verify that creating a small compressed textures will be 4KB aligned.
-    TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
-        DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
-
-        // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
-        DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
-
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = 8;
-        descriptor.size.height = 8;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-
-        // Create a smaller one that allows use of the smaller alignment.
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
-
-        EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
-                  static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
-
-        // Create a larger one (>64KB) that forbids use the smaller alignment.
-        descriptor.size.width = 4096;
-        descriptor.size.height = 4096;
-
-        texture = device.CreateTexture(&descriptor);
-        d3dTexture = reinterpret_cast<Texture*>(texture.Get());
-
-        EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
-                  static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
+class D3D12ResourceHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
     }
 
-    // Verify creating a UBO will always be 256B aligned.
-    TEST_P(D3D12ResourceHeapTests, AlignUBO) {
-        // Create a small UBO
-        wgpu::BufferDescriptor descriptor;
-        descriptor.size = 4 * 1024;
-        descriptor.usage = wgpu::BufferUsage::Uniform;
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
+        if (!mIsBCFormatSupported) {
+            return {};
+        }
 
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
-        Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
-
-        EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
-                   static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
-                  0u);
-
-        // Create a larger UBO
-        descriptor.size = (4 * 1024 * 1024) + 255;
-        descriptor.usage = wgpu::BufferUsage::Uniform;
-
-        buffer = device.CreateBuffer(&descriptor);
-        d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
-
-        EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
-                   static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
-                  0u);
+        return {wgpu::FeatureName::TextureCompressionBC};
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
+
+  private:
+    bool mIsBCFormatSupported = false;
+};
+
+// Verify that creating a small compressed textures will be 4KB aligned.
+TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
+
+    // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 8;
+    descriptor.size.height = 8;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    // Create a smaller one that allows use of the smaller alignment.
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
+
+    // Create a larger one (>64KB) that forbids use the smaller alignment.
+    descriptor.size.width = 4096;
+    descriptor.size.height = 4096;
+
+    texture = device.CreateTexture(&descriptor);
+    d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
+}
+
+// Verify creating a UBO will always be 256B aligned.
+TEST_P(D3D12ResourceHeapTests, AlignUBO) {
+    // Create a small UBO
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4 * 1024;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+               static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
+              0u);
+
+    // Create a larger UBO
+    descriptor.size = (4 * 1024 * 1024) + 255;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    buffer = device.CreateBuffer(&descriptor);
+    d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+               static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
+              0u);
+}
+
+DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/EGLImageWrappingTests.cpp b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
index ceee355..f20fda0 100644
--- a/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
+++ b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
@@ -26,92 +26,87 @@
 
 namespace {
 
-    class EGLFunctions {
-      public:
-        EGLFunctions() {
+class EGLFunctions {
+  public:
+    EGLFunctions() {
 #ifdef DAWN_PLATFORM_WINDOWS
-            const char* eglLib = "libEGL.dll";
+        const char* eglLib = "libEGL.dll";
 #else
-            const char* eglLib = "libEGL.so";
+        const char* eglLib = "libEGL.so";
 #endif
-            EXPECT_TRUE(mlibEGL.Open(eglLib));
-            CreateImage = reinterpret_cast<PFNEGLCREATEIMAGEPROC>(LoadProc("eglCreateImage"));
-            DestroyImage = reinterpret_cast<PFNEGLDESTROYIMAGEPROC>(LoadProc("eglDestroyImage"));
-            GetCurrentContext =
-                reinterpret_cast<PFNEGLGETCURRENTCONTEXTPROC>(LoadProc("eglGetCurrentContext"));
-            GetCurrentDisplay =
-                reinterpret_cast<PFNEGLGETCURRENTDISPLAYPROC>(LoadProc("eglGetCurrentDisplay"));
+        EXPECT_TRUE(mlibEGL.Open(eglLib));
+        CreateImage = reinterpret_cast<PFNEGLCREATEIMAGEPROC>(LoadProc("eglCreateImage"));
+        DestroyImage = reinterpret_cast<PFNEGLDESTROYIMAGEPROC>(LoadProc("eglDestroyImage"));
+        GetCurrentContext =
+            reinterpret_cast<PFNEGLGETCURRENTCONTEXTPROC>(LoadProc("eglGetCurrentContext"));
+        GetCurrentDisplay =
+            reinterpret_cast<PFNEGLGETCURRENTDISPLAYPROC>(LoadProc("eglGetCurrentDisplay"));
+    }
+
+  private:
+    void* LoadProc(const char* name) {
+        void* proc = mlibEGL.GetProc(name);
+        EXPECT_NE(proc, nullptr);
+        return proc;
+    }
+
+  public:
+    PFNEGLCREATEIMAGEPROC CreateImage;
+    PFNEGLDESTROYIMAGEPROC DestroyImage;
+    PFNEGLGETCURRENTCONTEXTPROC GetCurrentContext;
+    PFNEGLGETCURRENTDISPLAYPROC GetCurrentDisplay;
+
+  private:
+    DynamicLib mlibEGL;
+};
+
+class ScopedEGLImage {
+  public:
+    ScopedEGLImage(PFNEGLDESTROYIMAGEPROC destroyImage,
+                   PFNGLDELETETEXTURESPROC deleteTextures,
+                   EGLDisplay display,
+                   EGLImage image,
+                   GLuint texture)
+        : mDestroyImage(destroyImage),
+          mDeleteTextures(deleteTextures),
+          mDisplay(display),
+          mImage(image),
+          mTexture(texture) {}
+
+    ScopedEGLImage(ScopedEGLImage&& other) {
+        if (mImage != nullptr) {
+            mDestroyImage(mDisplay, mImage);
         }
-
-      private:
-        void* LoadProc(const char* name) {
-            void* proc = mlibEGL.GetProc(name);
-            EXPECT_NE(proc, nullptr);
-            return proc;
+        if (mTexture != 0) {
+            mDeleteTextures(1, &mTexture);
         }
+        mDestroyImage = std::move(other.mDestroyImage);
+        mDeleteTextures = std::move(other.mDeleteTextures);
+        mDisplay = std::move(other.mDisplay);
+        mImage = std::move(other.mImage);
+        mTexture = std::move(other.mTexture);
+    }
 
-      public:
-        PFNEGLCREATEIMAGEPROC CreateImage;
-        PFNEGLDESTROYIMAGEPROC DestroyImage;
-        PFNEGLGETCURRENTCONTEXTPROC GetCurrentContext;
-        PFNEGLGETCURRENTDISPLAYPROC GetCurrentDisplay;
-
-      private:
-        DynamicLib mlibEGL;
-    };
-
-    class ScopedEGLImage {
-      public:
-        ScopedEGLImage(PFNEGLDESTROYIMAGEPROC destroyImage,
-                       PFNGLDELETETEXTURESPROC deleteTextures,
-                       EGLDisplay display,
-                       EGLImage image,
-                       GLuint texture)
-            : mDestroyImage(destroyImage),
-              mDeleteTextures(deleteTextures),
-              mDisplay(display),
-              mImage(image),
-              mTexture(texture) {
+    ~ScopedEGLImage() {
+        if (mTexture != 0) {
+            mDeleteTextures(1, &mTexture);
         }
-
-        ScopedEGLImage(ScopedEGLImage&& other) {
-            if (mImage != nullptr) {
-                mDestroyImage(mDisplay, mImage);
-            }
-            if (mTexture != 0) {
-                mDeleteTextures(1, &mTexture);
-            }
-            mDestroyImage = std::move(other.mDestroyImage);
-            mDeleteTextures = std::move(other.mDeleteTextures);
-            mDisplay = std::move(other.mDisplay);
-            mImage = std::move(other.mImage);
-            mTexture = std::move(other.mTexture);
+        if (mImage != nullptr) {
+            mDestroyImage(mDisplay, mImage);
         }
+    }
 
-        ~ScopedEGLImage() {
-            if (mTexture != 0) {
-                mDeleteTextures(1, &mTexture);
-            }
-            if (mImage != nullptr) {
-                mDestroyImage(mDisplay, mImage);
-            }
-        }
+    EGLImage getImage() const { return mImage; }
 
-        EGLImage getImage() const {
-            return mImage;
-        }
+    GLuint getTexture() const { return mTexture; }
 
-        GLuint getTexture() const {
-            return mTexture;
-        }
-
-      private:
-        PFNEGLDESTROYIMAGEPROC mDestroyImage = nullptr;
-        PFNGLDELETETEXTURESPROC mDeleteTextures = nullptr;
-        EGLDisplay mDisplay = nullptr;
-        EGLImage mImage = nullptr;
-        GLuint mTexture = 0;
-    };
+  private:
+    PFNEGLDESTROYIMAGEPROC mDestroyImage = nullptr;
+    PFNGLDELETETEXTURESPROC mDeleteTextures = nullptr;
+    EGLDisplay mDisplay = nullptr;
+    EGLImage mImage = nullptr;
+    GLuint mTexture = 0;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/white_box/QueryInternalShaderTests.cpp b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
index ba429bc..619d6b5 100644
--- a/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
+++ b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
@@ -22,62 +22,61 @@
 
 namespace {
 
-    void EncodeConvertTimestampsToNanoseconds(wgpu::CommandEncoder encoder,
-                                              wgpu::Buffer timestamps,
-                                              wgpu::Buffer availability,
-                                              wgpu::Buffer params) {
-        ASSERT_TRUE(
-            dawn::native::EncodeConvertTimestampsToNanoseconds(
-                dawn::native::FromAPI(encoder.Get()), dawn::native::FromAPI(timestamps.Get()),
-                dawn::native::FromAPI(availability.Get()), dawn::native::FromAPI(params.Get()))
-                .IsSuccess());
+void EncodeConvertTimestampsToNanoseconds(wgpu::CommandEncoder encoder,
+                                          wgpu::Buffer timestamps,
+                                          wgpu::Buffer availability,
+                                          wgpu::Buffer params) {
+    ASSERT_TRUE(dawn::native::EncodeConvertTimestampsToNanoseconds(
+                    dawn::native::FromAPI(encoder.Get()), dawn::native::FromAPI(timestamps.Get()),
+                    dawn::native::FromAPI(availability.Get()), dawn::native::FromAPI(params.Get()))
+                    .IsSuccess());
+}
+
+class InternalShaderExpectation : public detail::Expectation {
+  public:
+    ~InternalShaderExpectation() override = default;
+
+    InternalShaderExpectation(const uint64_t* values, const unsigned int count) {
+        mExpected.assign(values, values + count);
     }
 
-    class InternalShaderExpectation : public detail::Expectation {
-      public:
-        ~InternalShaderExpectation() override = default;
+    // Expect the actual results are approximately equal to the expected values.
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        DAWN_ASSERT(size == sizeof(uint64_t) * mExpected.size());
+        // The computations in the shader use a multiplier that's a 16bit integer plus a shift
+        // that maximize the multiplier. This means that for the range of periods we care about
+        // (1 to 2^16-1 ns per tick), the high order bit of the multiplier will always be set.
+        // Intuitively this means that we have 15 bits of precision in the computation so we
+        // expect that for the error tolerance.
+        constexpr static float kErrorToleranceRatio = 1.0 / (1 << 15);  // about 3e-5.
 
-        InternalShaderExpectation(const uint64_t* values, const unsigned int count) {
-            mExpected.assign(values, values + count);
-        }
-
-        // Expect the actual results are approximately equal to the expected values.
-        testing::AssertionResult Check(const void* data, size_t size) override {
-            DAWN_ASSERT(size == sizeof(uint64_t) * mExpected.size());
-            // The computations in the shader use a multiplier that's a 16bit integer plus a shift
-            // that maximize the multiplier. This means that for the range of periods we care about
-            // (1 to 2^16-1 ns per tick), the high order bit of the multiplier will always be set.
-            // Intuitively this means that we have 15 bits of precision in the computation so we
-            // expect that for the error tolerance.
-            constexpr static float kErrorToleranceRatio = 1.0 / (1 << 15);  // about 3e-5.
-
-            const uint64_t* actual = static_cast<const uint64_t*>(data);
-            for (size_t i = 0; i < mExpected.size(); ++i) {
-                if (mExpected[i] == 0) {
-                    if (actual[i] != 0) {
-                        return testing::AssertionFailure()
-                               << "Expected data[" << i << "] to be 0, actual " << actual[i]
-                               << std::endl;
-                    }
-                    continue;
-                }
-
-                float errorRate = abs(static_cast<int64_t>(mExpected[i] - actual[i])) /
-                                  static_cast<float>(mExpected[i]);
-                if (errorRate > kErrorToleranceRatio) {
+        const uint64_t* actual = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < mExpected.size(); ++i) {
+            if (mExpected[i] == 0) {
+                if (actual[i] != 0) {
                     return testing::AssertionFailure()
-                           << "Expected data[" << i << "] to be " << mExpected[i] << ", actual "
-                           << actual[i] << ". Error rate " << errorRate << " is larger than "
-                           << kErrorToleranceRatio << std::endl;
+                           << "Expected data[" << i << "] to be 0, actual " << actual[i]
+                           << std::endl;
                 }
+                continue;
             }
 
-            return testing::AssertionSuccess();
+            float errorRate = abs(static_cast<int64_t>(mExpected[i] - actual[i])) /
+                              static_cast<float>(mExpected[i]);
+            if (errorRate > kErrorToleranceRatio) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be " << mExpected[i] << ", actual "
+                       << actual[i] << ". Error rate " << errorRate << " is larger than "
+                       << kErrorToleranceRatio << std::endl;
+            }
         }
 
-      private:
-        std::vector<uint64_t> mExpected;
-    };
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    std::vector<uint64_t> mExpected;
+};
 
 }  // anonymous namespace
 
@@ -190,7 +189,7 @@
 //   Expect 0 for unavailable timestamps and nanoseconds for available timestamps in an expected
 //   error tolerance ratio.
 // - The availability buffer passes the data of which slot in timestamps buffer is an initialized
-//   timestamp.
+//    timestamp.
 // - The params buffer passes the timestamp count, the offset in timestamps buffer and the
 //   timestamp period (here use GPU frequency (HZ) on Intel D3D12 to calculate the period in
 //   ns for testing).
diff --git a/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
index 1249973..8532c2a 100644
--- a/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
+++ b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
@@ -23,18 +23,18 @@
 
 namespace {
 
-    class VulkanErrorInjectorTests : public DawnTest {
-      public:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+class VulkanErrorInjectorTests : public DawnTest {
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
 
-            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
-        }
+        mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+    }
 
-      protected:
-        dawn::native::vulkan::Device* mDeviceVk;
-    };
+  protected:
+    dawn::native::vulkan::Device* mDeviceVk;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
index 0a34e90..ca8acb8 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
@@ -24,805 +24,792 @@
 
 namespace dawn::native::vulkan {
 
-    using ExternalTexture = VulkanImageWrappingTestBackend::ExternalTexture;
-    using ExternalSemaphore = VulkanImageWrappingTestBackend::ExternalSemaphore;
+using ExternalTexture = VulkanImageWrappingTestBackend::ExternalTexture;
+using ExternalSemaphore = VulkanImageWrappingTestBackend::ExternalSemaphore;
 
-    namespace {
+namespace {
 
-        class VulkanImageWrappingTestBase : public DawnTest {
-          protected:
-            std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-                return {wgpu::FeatureName::DawnInternalUsages};
-            }
-
-          public:
-            void SetUp() override {
-                DawnTest::SetUp();
-                DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-
-                mBackend = VulkanImageWrappingTestBackend::Create(device);
-
-                defaultDescriptor.dimension = wgpu::TextureDimension::e2D;
-                defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-                defaultDescriptor.size = {1, 1, 1};
-                defaultDescriptor.sampleCount = 1;
-                defaultDescriptor.mipLevelCount = 1;
-                defaultDescriptor.usage = wgpu::TextureUsage::RenderAttachment |
-                                          wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
-
-                defaultTexture = mBackend->CreateTexture(1, 1, defaultDescriptor.format,
-                                                         defaultDescriptor.usage);
-            }
-
-            void TearDown() override {
-                if (UsesWire()) {
-                    DawnTest::TearDown();
-                    return;
-                }
-
-                defaultTexture = nullptr;
-                mBackend = nullptr;
-                DawnTest::TearDown();
-            }
-
-            wgpu::Texture WrapVulkanImage(
-                wgpu::Device dawnDevice,
-                const wgpu::TextureDescriptor* textureDescriptor,
-                const ExternalTexture* externalTexture,
-                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
-                bool isInitialized = true,
-                bool expectValid = true) {
-                ExternalImageDescriptorVkForTesting descriptor;
-                return WrapVulkanImage(dawnDevice, textureDescriptor, externalTexture,
-                                       std::move(semaphores), descriptor.releasedOldLayout,
-                                       descriptor.releasedNewLayout, isInitialized, expectValid);
-            }
-
-            wgpu::Texture WrapVulkanImage(
-                wgpu::Device dawnDevice,
-                const wgpu::TextureDescriptor* textureDescriptor,
-                const ExternalTexture* externalTexture,
-                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
-                VkImageLayout releasedOldLayout,
-                VkImageLayout releasedNewLayout,
-                bool isInitialized = true,
-                bool expectValid = true) {
-                ExternalImageDescriptorVkForTesting descriptor;
-                descriptor.cTextureDescriptor =
-                    reinterpret_cast<const WGPUTextureDescriptor*>(textureDescriptor);
-                descriptor.isInitialized = isInitialized;
-                descriptor.releasedOldLayout = releasedOldLayout;
-                descriptor.releasedNewLayout = releasedNewLayout;
-
-                wgpu::Texture texture = mBackend->WrapImage(dawnDevice, externalTexture, descriptor,
-                                                            std::move(semaphores));
-
-                if (expectValid) {
-                    EXPECT_NE(texture, nullptr) << "Failed to wrap image, are external memory / "
-                                                   "semaphore extensions supported?";
-                } else {
-                    EXPECT_EQ(texture, nullptr);
-                }
-
-                return texture;
-            }
-
-            // Exports the signal from a wrapped texture and ignores it
-            // We have to export the signal before destroying the wrapped texture else it's an
-            // assertion failure
-            void IgnoreSignalSemaphore(wgpu::Texture wrappedTexture) {
-                ExternalImageExportInfoVkForTesting exportInfo;
-                bool result =
-                    mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
-                ASSERT(result);
-            }
-
-          protected:
-            std::unique_ptr<VulkanImageWrappingTestBackend> mBackend;
-
-            wgpu::TextureDescriptor defaultDescriptor;
-            std::unique_ptr<ExternalTexture> defaultTexture;
-        };
-
-    }  // namespace
-
-    using VulkanImageWrappingValidationTests = VulkanImageWrappingTestBase;
-
-    // Test no error occurs if the import is valid
-    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImport) {
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        EXPECT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
+class VulkanImageWrappingTestBase : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
     }
 
-    // Test no error occurs if the import is valid with DawnTextureInternalUsageDescriptor
-    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImportWithInternalUsageDescriptor) {
-        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
-        defaultDescriptor.nextInChain = &internalDesc;
-        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
-        internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
 
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        EXPECT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
+        mBackend = VulkanImageWrappingTestBackend::Create(device);
+
+        defaultDescriptor.dimension = wgpu::TextureDimension::e2D;
+        defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        defaultDescriptor.size = {1, 1, 1};
+        defaultDescriptor.sampleCount = 1;
+        defaultDescriptor.mipLevelCount = 1;
+        defaultDescriptor.usage = wgpu::TextureUsage::RenderAttachment |
+                                  wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+        defaultTexture =
+            mBackend->CreateTexture(1, 1, defaultDescriptor.format, defaultDescriptor.usage);
     }
 
-    // Test an error occurs if an invalid sType is the nextInChain
-    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDescriptor) {
-        wgpu::ChainedStruct chainedDescriptor;
-        chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
-        defaultDescriptor.nextInChain = &chainedDescriptor;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor dimension isn't 2D
-    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDimension) {
-        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor mip level count isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidMipLevelCount) {
-        defaultDescriptor.mipLevelCount = 2;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor depth isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidDepth) {
-        defaultDescriptor.size.depthOrArrayLayers = 2;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor sample count isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidSampleCount) {
-        defaultDescriptor.sampleCount = 4;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore twice
-    TEST_P(VulkanImageWrappingValidationTests, DoubleSignalSemaphoreExport) {
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        ASSERT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore from a normal texture
-    TEST_P(VulkanImageWrappingValidationTests, NormalTextureSignalSemaphoreExport) {
-        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
-        ASSERT_NE(texture.Get(), nullptr);
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore from a destroyed texture
-    TEST_P(VulkanImageWrappingValidationTests, DestroyedTextureSignalSemaphoreExport) {
-        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
-        ASSERT_NE(texture.Get(), nullptr);
-        texture.Destroy();
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Fixture to test using external memory textures through different usages.
-    // These tests are skipped if the harness is using the wire.
-    class VulkanImageWrappingUsageTests : public VulkanImageWrappingTestBase {
-      public:
-        void SetUp() override {
-            VulkanImageWrappingTestBase::SetUp();
-            if (UsesWire()) {
-                return;
-            }
-
-            // Create another device based on the original
-            backendAdapter =
-                dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get())->GetAdapter());
-            deviceDescriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = GetParam().forceEnabledWorkarounds.data();
-            togglesDesc.forceEnabledTogglesCount = GetParam().forceEnabledWorkarounds.size();
-            togglesDesc.forceDisabledToggles = GetParam().forceDisabledWorkarounds.data();
-            togglesDesc.forceDisabledTogglesCount = GetParam().forceDisabledWorkarounds.size();
-
-            secondDeviceVk =
-                dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
-            secondDevice = wgpu::Device::Acquire(dawn::native::ToAPI(secondDeviceVk));
+    void TearDown() override {
+        if (UsesWire()) {
+            DawnTest::TearDown();
+            return;
         }
 
-      protected:
-        dawn::native::vulkan::Adapter* backendAdapter;
-        dawn::native::DeviceDescriptor deviceDescriptor;
-        dawn::native::DawnTogglesDeviceDescriptor togglesDesc;
+        defaultTexture = nullptr;
+        mBackend = nullptr;
+        DawnTest::TearDown();
+    }
 
-        wgpu::Device secondDevice;
-        dawn::native::vulkan::Device* secondDeviceVk;
+    wgpu::Texture WrapVulkanImage(wgpu::Device dawnDevice,
+                                  const wgpu::TextureDescriptor* textureDescriptor,
+                                  const ExternalTexture* externalTexture,
+                                  std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                                  bool isInitialized = true,
+                                  bool expectValid = true) {
+        ExternalImageDescriptorVkForTesting descriptor;
+        return WrapVulkanImage(dawnDevice, textureDescriptor, externalTexture,
+                               std::move(semaphores), descriptor.releasedOldLayout,
+                               descriptor.releasedNewLayout, isInitialized, expectValid);
+    }
 
-        // Clear a texture on a given device
-        void ClearImage(wgpu::Device dawnDevice,
-                        wgpu::Texture wrappedTexture,
-                        wgpu::Color clearColor) {
-            wgpu::TextureView wrappedView = wrappedTexture.CreateView();
+    wgpu::Texture WrapVulkanImage(wgpu::Device dawnDevice,
+                                  const wgpu::TextureDescriptor* textureDescriptor,
+                                  const ExternalTexture* externalTexture,
+                                  std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                                  VkImageLayout releasedOldLayout,
+                                  VkImageLayout releasedNewLayout,
+                                  bool isInitialized = true,
+                                  bool expectValid = true) {
+        ExternalImageDescriptorVkForTesting descriptor;
+        descriptor.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(textureDescriptor);
+        descriptor.isInitialized = isInitialized;
+        descriptor.releasedOldLayout = releasedOldLayout;
+        descriptor.releasedNewLayout = releasedNewLayout;
 
-            // Submit a clear operation
-            utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
-            renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
-            renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        wgpu::Texture texture =
+            mBackend->WrapImage(dawnDevice, externalTexture, descriptor, std::move(semaphores));
 
-            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
-            pass.End();
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            wgpu::Queue queue = dawnDevice.GetQueue();
-            queue.Submit(1, &commands);
+        if (expectValid) {
+            EXPECT_NE(texture, nullptr) << "Failed to wrap image, are external memory / "
+                                           "semaphore extensions supported?";
+        } else {
+            EXPECT_EQ(texture, nullptr);
         }
 
-        // Submits a 1x1x1 copy from source to destination
-        void SimpleCopyTextureToTexture(wgpu::Device dawnDevice,
-                                        wgpu::Queue dawnQueue,
-                                        wgpu::Texture source,
-                                        wgpu::Texture destination) {
-            wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
-            wgpu::ImageCopyTexture copyDst =
-                utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+        return texture;
+    }
 
-            wgpu::Extent3D copySize = {1, 1, 1};
+    // Exports the signal from a wrapped texture and ignores it
+    // We have to export the signal before destroying the wrapped texture else it's an
+    // assertion failure
+    void IgnoreSignalSemaphore(wgpu::Texture wrappedTexture) {
+        ExternalImageExportInfoVkForTesting exportInfo;
+        bool result = mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
+        ASSERT(result);
+    }
 
-            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
+  protected:
+    std::unique_ptr<VulkanImageWrappingTestBackend> mBackend;
 
-            dawnQueue.Submit(1, &commands);
+    wgpu::TextureDescriptor defaultDescriptor;
+    std::unique_ptr<ExternalTexture> defaultTexture;
+};
+
+}  // namespace
+
+using VulkanImageWrappingValidationTests = VulkanImageWrappingTestBase;
+
+// Test no error occurs if the import is valid
+TEST_P(VulkanImageWrappingValidationTests, SuccessfulImport) {
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    EXPECT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+}
+
+// Test no error occurs if the import is valid with DawnTextureInternalUsageDescriptor
+TEST_P(VulkanImageWrappingValidationTests, SuccessfulImportWithInternalUsageDescriptor) {
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    defaultDescriptor.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    EXPECT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+}
+
+// Test an error occurs if an invalid sType is the nextInChain
+TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDescriptor) {
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
+    defaultDescriptor.nextInChain = &chainedDescriptor;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor dimension isn't 2D
+TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDimension) {
+    defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor mip level count isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidMipLevelCount) {
+    defaultDescriptor.mipLevelCount = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor depth isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidDepth) {
+    defaultDescriptor.size.depthOrArrayLayers = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor sample count isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidSampleCount) {
+    defaultDescriptor.sampleCount = 4;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if we try to export the signal semaphore twice
+TEST_P(VulkanImageWrappingValidationTests, DoubleSignalSemaphoreExport) {
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    ASSERT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Test an error occurs if we try to export the signal semaphore from a normal texture
+TEST_P(VulkanImageWrappingValidationTests, NormalTextureSignalSemaphoreExport) {
+    wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+    ASSERT_NE(texture.Get(), nullptr);
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Test an error occurs if we try to export the signal semaphore from a destroyed texture
+TEST_P(VulkanImageWrappingValidationTests, DestroyedTextureSignalSemaphoreExport) {
+    wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+    ASSERT_NE(texture.Get(), nullptr);
+    texture.Destroy();
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Fixture to test using external memory textures through different usages.
+// These tests are skipped if the harness is using the wire.
+class VulkanImageWrappingUsageTests : public VulkanImageWrappingTestBase {
+  public:
+    void SetUp() override {
+        VulkanImageWrappingTestBase::SetUp();
+        if (UsesWire()) {
+            return;
         }
-    };
 
-    // Clear an image in |secondDevice|
-    // Verify clear color is visible in |device|
-    TEST_P(VulkanImageWrappingUsageTests, ClearImageAcrossDevices) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+        // Create another device based on the original
+        backendAdapter =
+            dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get())->GetAdapter());
+        deviceDescriptor.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledToggles = GetParam().forceEnabledWorkarounds.data();
+        togglesDesc.forceEnabledTogglesCount = GetParam().forceEnabledWorkarounds.size();
+        togglesDesc.forceDisabledToggles = GetParam().forceDisabledWorkarounds.data();
+        togglesDesc.forceDisabledTogglesCount = GetParam().forceDisabledWorkarounds.size();
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on signalFd
-        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Verify |device| sees the changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
+        secondDeviceVk =
+            dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+        secondDevice = wgpu::Device::Acquire(dawn::native::ToAPI(secondDeviceVk));
     }
 
-    // Clear an image in |secondDevice|
-    // Verify clear color is not visible in |device| if we import the texture as not cleared
-    TEST_P(VulkanImageWrappingUsageTests, UninitializedTextureIsCleared) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+  protected:
+    dawn::native::vulkan::Adapter* backendAdapter;
+    dawn::native::DeviceDescriptor deviceDescriptor;
+    dawn::native::DawnTogglesDeviceDescriptor togglesDesc;
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+    wgpu::Device secondDevice;
+    dawn::native::vulkan::Device* secondDeviceVk;
 
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
+    // Clear a texture on a given device
+    void ClearImage(wgpu::Device dawnDevice, wgpu::Texture wrappedTexture, wgpu::Color clearColor) {
+        wgpu::TextureView wrappedView = wrappedTexture.CreateView();
 
-        // Import the image to |device|, making sure we wait on signalFd
-        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout, false);
+        // Submit a clear operation
+        utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
+        renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
 
-        // Verify |device| doesn't see the changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), nextWrappedTexture, 0, 0);
+        wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
 
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Import a texture into |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
-    // Verify the clear color from |secondDevice| is visible in |copyDstTexture|
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureSrcSync) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Create a second texture on |device|
-        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
-
-        // Copy |deviceWrappedTexture| into |copyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
-
-        // Verify |copyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
-
-        IgnoreSignalSemaphore(deviceWrappedTexture);
-    }
-
-    // Import a texture into |device|
-    // Clear texture with color A on |device|
-    // Import same texture into |secondDevice|, waiting on the copy signal
-    // Clear the new texture with color B on |secondDevice|
-    // Copy color B using Texture to Texture copy on |secondDevice|
-    // Import texture back into |device|, waiting on color B signal
-    // Verify texture contains color B
-    // If texture destination isn't synchronized, |secondDevice| could copy color B
-    // into the texture first, then |device| writes color A
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureDstSync) {
-        // Import the image on |device|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |device|
-        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |secondDevice|, making sure we wait on |signalFd|
-        wgpu::Texture secondDeviceWrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
-                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
-                            exportInfo.releasedNewLayout);
-
-        // Create a texture with color B on |secondDevice|
-        wgpu::Texture copySrcTexture = secondDevice.CreateTexture(&defaultDescriptor);
-        ClearImage(secondDevice, copySrcTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        // Copy color B on |secondDevice|
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, copySrcTexture,
-                                   secondDeviceWrappedTexture);
-
-        // Re-import back into |device|, waiting on |secondDevice|'s signal
-        ExternalImageExportInfoVkForTesting secondExportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
-                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
-
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
-                            std::move(secondExportInfo.semaphores),
-                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
-
-        // Verify |nextWrappedTexture| contains the color from our copy
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Import a texture from |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstBuffer|
-    // Verify the clear color from |secondDevice| is visible in |copyDstBuffer|
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToBufferSrcSync) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Create a destination buffer on |device|
-        wgpu::BufferDescriptor bufferDesc;
-        bufferDesc.size = 4;
-        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
-        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&bufferDesc);
-
-        // Copy |deviceWrappedTexture| into |copyDstBuffer|
-        wgpu::ImageCopyTexture copySrc =
-            utils::CreateImageCopyTexture(deviceWrappedTexture, 0, {0, 0, 0});
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, 256);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
         wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = dawnDevice.GetQueue();
         queue.Submit(1, &commands);
-
-        // Verify |copyDstBuffer| sees changes from |secondDevice|
-        uint32_t expected = 0x04030201;
-        EXPECT_BUFFER_U32_EQ(expected, copyDstBuffer, 0);
-
-        IgnoreSignalSemaphore(deviceWrappedTexture);
     }
 
-    // Import a texture into |device|
-    // Clear texture with color A on |device|
-    // Import same texture into |secondDevice|, waiting on the copy signal
-    // Copy color B using Buffer to Texture copy on |secondDevice|
-    // Import texture back into |device|, waiting on color B signal
-    // Verify texture contains color B
-    // If texture destination isn't synchronized, |secondDevice| could copy color B
-    // into the texture first, then |device| writes color A
-    TEST_P(VulkanImageWrappingUsageTests, CopyBufferToTextureDstSync) {
-        // Import the image on |device|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |device|
-        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |secondDevice|, making sure we wait on |signalFd|
-        wgpu::Texture secondDeviceWrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
-                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
-                            exportInfo.releasedNewLayout);
-
-        // Copy color B on |secondDevice|
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-
-        // Create a buffer on |secondDevice|
-        wgpu::Buffer copySrcBuffer =
-            utils::CreateBufferFromData(secondDevice, wgpu::BufferUsage::CopySrc, {0x04030201});
-
-        // Copy |copySrcBuffer| into |secondDeviceWrappedTexture|
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, 256);
-        wgpu::ImageCopyTexture copyDst =
-            utils::CreateImageCopyTexture(secondDeviceWrappedTexture, 0, {0, 0, 0});
+    // Submits a 1x1x1 copy from source to destination
+    void SimpleCopyTextureToTexture(wgpu::Device dawnDevice,
+                                    wgpu::Queue dawnQueue,
+                                    wgpu::Texture source,
+                                    wgpu::Texture destination) {
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
 
         wgpu::Extent3D copySize = {1, 1, 1};
 
+        wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        dawnQueue.Submit(1, &commands);
+    }
+};
+
+// Clear an image in |secondDevice|
+// Verify clear color is visible in |device|
+TEST_P(VulkanImageWrappingUsageTests, ClearImageAcrossDevices) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on signalFd
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Verify |device| sees the changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Clear an image in |secondDevice|
+// Verify clear color is not visible in |device| if we import the texture as not cleared
+TEST_P(VulkanImageWrappingUsageTests, UninitializedTextureIsCleared) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on signalFd
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout, false);
+
+    // Verify |device| doesn't see the changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture into |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstTexture|
+// Verify the clear color from |secondDevice| is visible in |copyDstTexture|
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureSrcSync) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a second texture on |device|
+    wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Copy |deviceWrappedTexture| into |copyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+    // Verify |copyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Import a texture into |device|
+// Clear texture with color A on |device|
+// Import same texture into |secondDevice|, waiting on the copy signal
+// Clear the new texture with color B on |secondDevice|
+// Copy color B using Texture to Texture copy on |secondDevice|
+// Import texture back into |device|, waiting on color B signal
+// Verify texture contains color B
+// If texture destination isn't synchronized, |secondDevice| could copy color B
+// into the texture first, then |device| writes color A
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureDstSync) {
+    // Import the image on |device|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |device|
+    ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &exportInfo));
+
+    // Import the image to |secondDevice|, making sure we wait on |signalFd|
+    wgpu::Texture secondDeviceWrappedTexture = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a texture with color B on |secondDevice|
+    wgpu::Texture copySrcTexture = secondDevice.CreateTexture(&defaultDescriptor);
+    ClearImage(secondDevice, copySrcTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    // Copy color B on |secondDevice|
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+    SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, copySrcTexture,
+                               secondDeviceWrappedTexture);
+
+    // Re-import back into |device|, waiting on |secondDevice|'s signal
+    ExternalImageExportInfoVkForTesting secondExportInfo;
+    ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(secondExportInfo.semaphores),
+        secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+    // Verify |nextWrappedTexture| contains the color from our copy
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture from |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstBuffer|
+// Verify the clear color from |secondDevice| is visible in |copyDstBuffer|
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToBufferSrcSync) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a destination buffer on |device|
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 4;
+    bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+    wgpu::Buffer copyDstBuffer = device.CreateBuffer(&bufferDesc);
+
+    // Copy |deviceWrappedTexture| into |copyDstBuffer|
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(deviceWrappedTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, 256);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify |copyDstBuffer| sees changes from |secondDevice|
+    uint32_t expected = 0x04030201;
+    EXPECT_BUFFER_U32_EQ(expected, copyDstBuffer, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Import a texture into |device|
+// Clear texture with color A on |device|
+// Import same texture into |secondDevice|, waiting on the copy signal
+// Copy color B using Buffer to Texture copy on |secondDevice|
+// Import texture back into |device|, waiting on color B signal
+// Verify texture contains color B
+// If texture destination isn't synchronized, |secondDevice| could copy color B
+// into the texture first, then |device| writes color A
+TEST_P(VulkanImageWrappingUsageTests, CopyBufferToTextureDstSync) {
+    // Import the image on |device|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |device|
+    ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |secondDevice|, making sure we wait on |signalFd|
+    wgpu::Texture secondDeviceWrappedTexture = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Copy color B on |secondDevice|
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+    // Create a buffer on |secondDevice|
+    wgpu::Buffer copySrcBuffer =
+        utils::CreateBufferFromData(secondDevice, wgpu::BufferUsage::CopySrc, {0x04030201});
+
+    // Copy |copySrcBuffer| into |secondDeviceWrappedTexture|
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, 256);
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(secondDeviceWrappedTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    secondDeviceQueue.Submit(1, &commands);
+
+    // Re-import back into |device|, waiting on |secondDevice|'s signal
+    ExternalImageExportInfoVkForTesting secondExportInfo;
+    ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(secondExportInfo.semaphores),
+        secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+    // Verify |nextWrappedTexture| contains the color from our copy
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture from |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstTexture|
+// Issue second copy to |secondCopyDstTexture|
+// Verify the clear color from |secondDevice| is visible in both copies
+TEST_P(VulkanImageWrappingUsageTests, DoubleTextureUsage) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a second texture on |device|
+    wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Create a third texture on |device|
+    wgpu::Texture secondCopyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Copy |deviceWrappedTexture| into |copyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+    // Copy |deviceWrappedTexture| into |secondCopyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, secondCopyDstTexture);
+
+    // Verify |copyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+    // Verify |secondCopyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), secondCopyDstTexture, 0, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Tex A on device 3 (external export)
+// Tex B on device 2 (external export)
+// Tex C on device 1 (external export)
+// Clear color for A on device 3
+// Copy A->B on device 3
+// Copy B->C on device 2 (wait on B from previous op)
+// Copy C->D on device 1 (wait on C from previous op)
+// Verify D has same color as A
+TEST_P(VulkanImageWrappingUsageTests, ChainTextureCopy) {
+    // device 1 = |device|
+    // device 2 = |secondDevice|
+    // Create device 3
+    dawn::native::vulkan::Device* thirdDeviceVk =
+        dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+    wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn::native::ToAPI(thirdDeviceVk));
+
+    // Make queue for device 2 and 3
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+    wgpu::Queue thirdDeviceQueue = thirdDevice.GetQueue();
+
+    // Create textures A, B, C
+    std::unique_ptr<ExternalTexture> textureA =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+    std::unique_ptr<ExternalTexture> textureB =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+    std::unique_ptr<ExternalTexture> textureC =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+
+    // Import TexA, TexB on device 3
+    wgpu::Texture wrappedTexADevice3 =
+        WrapVulkanImage(thirdDevice, &defaultDescriptor, textureA.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    wgpu::Texture wrappedTexBDevice3 =
+        WrapVulkanImage(thirdDevice, &defaultDescriptor, textureB.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Clear TexA
+    ClearImage(thirdDevice, wrappedTexADevice3, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    // Copy A->B
+    SimpleCopyTextureToTexture(thirdDevice, thirdDeviceQueue, wrappedTexADevice3,
+                               wrappedTexBDevice3);
+
+    ExternalImageExportInfoVkForTesting exportInfoTexBDevice3;
+    ASSERT_TRUE(mBackend->ExportImage(wrappedTexBDevice3, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                      &exportInfoTexBDevice3));
+    IgnoreSignalSemaphore(wrappedTexADevice3);
+
+    // Import TexB, TexC on device 2
+    wgpu::Texture wrappedTexBDevice2 = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, textureB.get(),
+        std::move(exportInfoTexBDevice3.semaphores), exportInfoTexBDevice3.releasedOldLayout,
+        exportInfoTexBDevice3.releasedNewLayout);
+
+    wgpu::Texture wrappedTexCDevice2 =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, textureC.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Copy B->C on device 2
+    SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, wrappedTexBDevice2,
+                               wrappedTexCDevice2);
+
+    ExternalImageExportInfoVkForTesting exportInfoTexCDevice2;
+    ASSERT_TRUE(mBackend->ExportImage(wrappedTexCDevice2, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                      &exportInfoTexCDevice2));
+    IgnoreSignalSemaphore(wrappedTexBDevice2);
+
+    // Import TexC on device 1
+    wgpu::Texture wrappedTexCDevice1 = WrapVulkanImage(
+        device, &defaultDescriptor, textureC.get(), std::move(exportInfoTexCDevice2.semaphores),
+        exportInfoTexCDevice2.releasedOldLayout, exportInfoTexCDevice2.releasedNewLayout);
+
+    // Create TexD on device 1
+    wgpu::Texture texD = device.CreateTexture(&defaultDescriptor);
+
+    // Copy C->D on device 1
+    SimpleCopyTextureToTexture(device, queue, wrappedTexCDevice1, texD);
+
+    // Verify D matches clear color
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), texD, 0, 0);
+
+    IgnoreSignalSemaphore(wrappedTexCDevice1);
+}
+
+// Tests a larger image is preserved when importing
+TEST_P(VulkanImageWrappingUsageTests, LargerImage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 640;
+    descriptor.size.height = 480;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+
+    // Fill memory with textures
+    std::vector<wgpu::Texture> textures;
+    for (int i = 0; i < 20; i++) {
+        textures.push_back(device.CreateTexture(&descriptor));
+    }
+
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+    // Make an image on |secondDevice|
+    std::unique_ptr<ExternalTexture> texture = mBackend->CreateTexture(
+        descriptor.size.width, descriptor.size.height, descriptor.format, descriptor.usage);
+
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &descriptor, texture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
+                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Draw a non-trivial picture
+    uint32_t width = 640, height = 480, pixelSize = 4;
+    uint32_t bytesPerRow = Align(width * pixelSize, kTextureBytesPerRowAlignment);
+    std::vector<unsigned char> data(bytesPerRow * (height - 1) + width * pixelSize);
+
+    for (uint32_t row = 0; row < height; row++) {
+        for (uint32_t col = 0; col < width; col++) {
+            float normRow = static_cast<float>(row) / height;
+            float normCol = static_cast<float>(col) / width;
+            float dist = sqrt(normRow * normRow + normCol * normCol) * 3;
+            dist = dist - static_cast<int>(dist);
+            data[4 * (row * width + col)] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 1] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 2] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 3] = 255;
+        }
+    }
+
+    // Write the picture
+    {
+        wgpu::Buffer copySrcBuffer = utils::CreateBufferFromData(
+            secondDevice, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, bytesPerRow);
+        wgpu::ImageCopyTexture copyDst =
+            utils::CreateImageCopyTexture(wrappedTexture, 0, {0, 0, 0});
+        wgpu::Extent3D copySize = {width, height, 1};
+
         wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
         encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
         wgpu::CommandBuffer commands = encoder.Finish();
         secondDeviceQueue.Submit(1, &commands);
+    }
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
 
-        // Re-import back into |device|, waiting on |secondDevice|'s signal
-        ExternalImageExportInfoVkForTesting secondExportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
-                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+    // Import the image on |device|
+    wgpu::Texture nextWrappedTexture =
+        WrapVulkanImage(device, &descriptor, texture.get(), std::move(exportInfo.semaphores),
+                        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
 
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
-                            std::move(secondExportInfo.semaphores),
-                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+    // Copy the image into a buffer for comparison
+    wgpu::BufferDescriptor copyDesc;
+    copyDesc.size = data.size();
+    copyDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer copyDstBuffer = device.CreateBuffer(&copyDesc);
+    {
+        wgpu::ImageCopyTexture copySrc =
+            utils::CreateImageCopyTexture(nextWrappedTexture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, bytesPerRow);
 
-        // Verify |nextWrappedTexture| contains the color from our copy
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+        wgpu::Extent3D copySize = {width, height, 1};
 
-        IgnoreSignalSemaphore(nextWrappedTexture);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
     }
 
-    // Import a texture from |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
-    // Issue second copy to |secondCopyDstTexture|
-    // Verify the clear color from |secondDevice| is visible in both copies
-    TEST_P(VulkanImageWrappingUsageTests, DoubleTextureUsage) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+    // Check the image is not corrupted on |device|
+    EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(data.data()), copyDstBuffer, 0,
+                               data.size() / 4);
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
 
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
+// Test that texture descriptor view formats are passed to the backend for wrapped external
+// textures, and that contents may be reinterpreted as sRGB.
+TEST_P(VulkanImageWrappingUsageTests, SRGBReinterpretation) {
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
 
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {2, 2, 1};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    textureDesc.viewFormatCount = 1;
+    textureDesc.viewFormats = &viewDesc.format;
 
-        // Create a second texture on |device|
-        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+    std::unique_ptr<ExternalTexture> backendTexture = mBackend->CreateTexture(
+        textureDesc.size.width, textureDesc.size.height, textureDesc.format, textureDesc.usage);
 
-        // Create a third texture on |device|
-        wgpu::Texture secondCopyDstTexture = device.CreateTexture(&defaultDescriptor);
+    // Import the image on |device|
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &textureDesc, backendTexture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
+                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+    ASSERT_NE(texture.Get(), nullptr);
 
-        // Copy |deviceWrappedTexture| into |copyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+    wgpu::ImageCopyTexture dst = {};
+    dst.texture = texture;
+    std::array<RGBA8, 4> rgbaTextureData = {
+        RGBA8(180, 0, 0, 255),
+        RGBA8(0, 84, 0, 127),
+        RGBA8(0, 0, 62, 100),
+        RGBA8(62, 180, 84, 90),
+    };
 
-        // Copy |deviceWrappedTexture| into |secondCopyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, secondCopyDstTexture);
+    wgpu::TextureDataLayout dataLayout = {};
+    dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
 
-        // Verify |copyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+    queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
+                       &dataLayout, &textureDesc.size);
 
-        // Verify |secondCopyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), secondCopyDstTexture, 0, 0);
+    wgpu::TextureView textureView = texture.CreateView(&viewDesc);
 
-        IgnoreSignalSemaphore(deviceWrappedTexture);
-    }
-
-    // Tex A on device 3 (external export)
-    // Tex B on device 2 (external export)
-    // Tex C on device 1 (external export)
-    // Clear color for A on device 3
-    // Copy A->B on device 3
-    // Copy B->C on device 2 (wait on B from previous op)
-    // Copy C->D on device 1 (wait on C from previous op)
-    // Verify D has same color as A
-    TEST_P(VulkanImageWrappingUsageTests, ChainTextureCopy) {
-        // device 1 = |device|
-        // device 2 = |secondDevice|
-        // Create device 3
-        dawn::native::vulkan::Device* thirdDeviceVk =
-            dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
-        wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn::native::ToAPI(thirdDeviceVk));
-
-        // Make queue for device 2 and 3
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-        wgpu::Queue thirdDeviceQueue = thirdDevice.GetQueue();
-
-        // Create textures A, B, C
-        std::unique_ptr<ExternalTexture> textureA =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-        std::unique_ptr<ExternalTexture> textureB =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-        std::unique_ptr<ExternalTexture> textureC =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-
-        // Import TexA, TexB on device 3
-        wgpu::Texture wrappedTexADevice3 =
-            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureA.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        wgpu::Texture wrappedTexBDevice3 =
-            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureB.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Clear TexA
-        ClearImage(thirdDevice, wrappedTexADevice3,
-                   {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        // Copy A->B
-        SimpleCopyTextureToTexture(thirdDevice, thirdDeviceQueue, wrappedTexADevice3,
-                                   wrappedTexBDevice3);
-
-        ExternalImageExportInfoVkForTesting exportInfoTexBDevice3;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexBDevice3, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfoTexBDevice3));
-        IgnoreSignalSemaphore(wrappedTexADevice3);
-
-        // Import TexB, TexC on device 2
-        wgpu::Texture wrappedTexBDevice2 = WrapVulkanImage(
-            secondDevice, &defaultDescriptor, textureB.get(),
-            std::move(exportInfoTexBDevice3.semaphores), exportInfoTexBDevice3.releasedOldLayout,
-            exportInfoTexBDevice3.releasedNewLayout);
-
-        wgpu::Texture wrappedTexCDevice2 =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, textureC.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Copy B->C on device 2
-        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, wrappedTexBDevice2,
-                                   wrappedTexCDevice2);
-
-        ExternalImageExportInfoVkForTesting exportInfoTexCDevice2;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexCDevice2, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfoTexCDevice2));
-        IgnoreSignalSemaphore(wrappedTexBDevice2);
-
-        // Import TexC on device 1
-        wgpu::Texture wrappedTexCDevice1 = WrapVulkanImage(
-            device, &defaultDescriptor, textureC.get(), std::move(exportInfoTexCDevice2.semaphores),
-            exportInfoTexCDevice2.releasedOldLayout, exportInfoTexCDevice2.releasedNewLayout);
-
-        // Create TexD on device 1
-        wgpu::Texture texD = device.CreateTexture(&defaultDescriptor);
-
-        // Copy C->D on device 1
-        SimpleCopyTextureToTexture(device, queue, wrappedTexCDevice1, texD);
-
-        // Verify D matches clear color
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), texD, 0, 0);
-
-        IgnoreSignalSemaphore(wrappedTexCDevice1);
-    }
-
-    // Tests a larger image is preserved when importing
-    TEST_P(VulkanImageWrappingUsageTests, LargerImage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = 640;
-        descriptor.size.height = 480;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
-
-        // Fill memory with textures
-        std::vector<wgpu::Texture> textures;
-        for (int i = 0; i < 20; i++) {
-            textures.push_back(device.CreateTexture(&descriptor));
-        }
-
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-
-        // Make an image on |secondDevice|
-        std::unique_ptr<ExternalTexture> texture = mBackend->CreateTexture(
-            descriptor.size.width, descriptor.size.height, descriptor.format, descriptor.usage);
-
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &descriptor, texture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
-                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Draw a non-trivial picture
-        uint32_t width = 640, height = 480, pixelSize = 4;
-        uint32_t bytesPerRow = Align(width * pixelSize, kTextureBytesPerRowAlignment);
-        std::vector<unsigned char> data(bytesPerRow * (height - 1) + width * pixelSize);
-
-        for (uint32_t row = 0; row < height; row++) {
-            for (uint32_t col = 0; col < width; col++) {
-                float normRow = static_cast<float>(row) / height;
-                float normCol = static_cast<float>(col) / width;
-                float dist = sqrt(normRow * normRow + normCol * normCol) * 3;
-                dist = dist - static_cast<int>(dist);
-                data[4 * (row * width + col)] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 1] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 2] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 3] = 255;
-            }
-        }
-
-        // Write the picture
-        {
-            wgpu::Buffer copySrcBuffer = utils::CreateBufferFromData(
-                secondDevice, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
-            wgpu::ImageCopyBuffer copySrc =
-                utils::CreateImageCopyBuffer(copySrcBuffer, 0, bytesPerRow);
-            wgpu::ImageCopyTexture copyDst =
-                utils::CreateImageCopyTexture(wrappedTexture, 0, {0, 0, 0});
-            wgpu::Extent3D copySize = {width, height, 1};
-
-            wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
-            secondDeviceQueue.Submit(1, &commands);
-        }
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image on |device|
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &descriptor, texture.get(), std::move(exportInfo.semaphores),
-                            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Copy the image into a buffer for comparison
-        wgpu::BufferDescriptor copyDesc;
-        copyDesc.size = data.size();
-        copyDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&copyDesc);
-        {
-            wgpu::ImageCopyTexture copySrc =
-                utils::CreateImageCopyTexture(nextWrappedTexture, 0, {0, 0, 0});
-            wgpu::ImageCopyBuffer copyDst =
-                utils::CreateImageCopyBuffer(copyDstBuffer, 0, bytesPerRow);
-
-            wgpu::Extent3D copySize = {width, height, 1};
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Check the image is not corrupted on |device|
-        EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(data.data()), copyDstBuffer, 0,
-                                   data.size() / 4);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Test that texture descriptor view formats are passed to the backend for wrapped external
-    // textures, and that contents may be reinterpreted as sRGB.
-    TEST_P(VulkanImageWrappingUsageTests, SRGBReinterpretation) {
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-
-        wgpu::TextureDescriptor textureDesc = {};
-        textureDesc.size = {2, 2, 1};
-        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-        textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
-        textureDesc.viewFormatCount = 1;
-        textureDesc.viewFormats = &viewDesc.format;
-
-        std::unique_ptr<ExternalTexture> backendTexture = mBackend->CreateTexture(
-            textureDesc.size.width, textureDesc.size.height, textureDesc.format, textureDesc.usage);
-
-        // Import the image on |device|
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &textureDesc, backendTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-        ASSERT_NE(texture.Get(), nullptr);
-
-        wgpu::ImageCopyTexture dst = {};
-        dst.texture = texture;
-        std::array<RGBA8, 4> rgbaTextureData = {
-            RGBA8(180, 0, 0, 255),
-            RGBA8(0, 84, 0, 127),
-            RGBA8(0, 0, 62, 100),
-            RGBA8(62, 180, 84, 90),
-        };
-
-        wgpu::TextureDataLayout dataLayout = {};
-        dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
-
-        queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
-                           &dataLayout, &textureDesc.size);
-
-        wgpu::TextureView textureView = texture.CreateView(&viewDesc);
-
-        utils::ComboRenderPipelineDescriptor pipelineDesc;
-        pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
             @stage(vertex)
             fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
                 var pos = array<vec2<f32>, 6>(
@@ -835,7 +822,7 @@
                 return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
             }
         )");
-        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
             @group(0) @binding(0) var texture : texture_2d<f32>;
 
             @stage(fragment)
@@ -844,45 +831,44 @@
             }
         )");
 
-        utils::BasicRenderPass renderPass =
-            utils::CreateBasicRenderPass(device, textureDesc.size.width, textureDesc.size.height,
-                                         wgpu::TextureFormat::RGBA8Unorm);
-        pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, textureDesc.size.width, textureDesc.size.height, wgpu::TextureFormat::RGBA8Unorm);
+    pipelineDesc.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
 
-            wgpu::BindGroup bindGroup =
-                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
 
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-            pass.SetPipeline(pipeline);
-            pass.SetBindGroup(0, bindGroup);
-            pass.Draw(6);
-            pass.End();
-        }
-
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(116, 0, 0, 255),   //
-            RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(0, 23, 0, 127),    //
-            RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(0, 0, 12, 100),    //
-            RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(12, 116, 23, 90),  //
-            RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
-
-        IgnoreSignalSemaphore(texture);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(6);
+        pass.End();
     }
 
-    DAWN_INSTANTIATE_TEST(VulkanImageWrappingValidationTests, VulkanBackend());
-    DAWN_INSTANTIATE_TEST(VulkanImageWrappingUsageTests, VulkanBackend());
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(116, 0, 0, 255),   //
+        RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 23, 0, 127),    //
+        RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 0, 12, 100),    //
+        RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(12, 116, 23, 90),  //
+        RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
+
+    IgnoreSignalSemaphore(texture);
+}
+
+DAWN_INSTANTIATE_TEST(VulkanImageWrappingValidationTests, VulkanBackend());
+DAWN_INSTANTIATE_TEST(VulkanImageWrappingUsageTests, VulkanBackend());
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.h b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
index 4cc34c6..dd7152b 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests.h
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
@@ -28,48 +28,47 @@
 
 namespace dawn::native::vulkan {
 
-    struct ExternalImageDescriptorVkForTesting;
-    struct ExternalImageExportInfoVkForTesting;
+struct ExternalImageDescriptorVkForTesting;
+struct ExternalImageExportInfoVkForTesting;
 
-    class VulkanImageWrappingTestBackend {
+class VulkanImageWrappingTestBackend {
+  public:
+    static std::unique_ptr<VulkanImageWrappingTestBackend> Create(const wgpu::Device& device);
+    virtual ~VulkanImageWrappingTestBackend() = default;
+
+    class ExternalTexture : NonCopyable {
       public:
-        static std::unique_ptr<VulkanImageWrappingTestBackend> Create(const wgpu::Device& device);
-        virtual ~VulkanImageWrappingTestBackend() = default;
-
-        class ExternalTexture : NonCopyable {
-          public:
-            virtual ~ExternalTexture() = default;
-        };
-        class ExternalSemaphore : NonCopyable {
-          public:
-            virtual ~ExternalSemaphore() = default;
-        };
-
-        virtual std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                               uint32_t height,
-                                                               wgpu::TextureFormat format,
-                                                               wgpu::TextureUsage usage) = 0;
-        virtual wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) = 0;
-
-        virtual bool ExportImage(const wgpu::Texture& texture,
-                                 VkImageLayout layout,
-                                 ExternalImageExportInfoVkForTesting* exportInfo) = 0;
+        virtual ~ExternalTexture() = default;
+    };
+    class ExternalSemaphore : NonCopyable {
+      public:
+        virtual ~ExternalSemaphore() = default;
     };
 
-    struct ExternalImageDescriptorVkForTesting : public ExternalImageDescriptorVk {
-      public:
-        ExternalImageDescriptorVkForTesting();
-    };
+    virtual std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                           uint32_t height,
+                                                           wgpu::TextureFormat format,
+                                                           wgpu::TextureUsage usage) = 0;
+    virtual wgpu::Texture WrapImage(const wgpu::Device& device,
+                                    const ExternalTexture* texture,
+                                    const ExternalImageDescriptorVkForTesting& descriptor,
+                                    std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) = 0;
 
-    struct ExternalImageExportInfoVkForTesting : public ExternalImageExportInfoVk {
-      public:
-        ExternalImageExportInfoVkForTesting();
-        std::vector<std::unique_ptr<VulkanImageWrappingTestBackend::ExternalSemaphore>> semaphores;
-    };
+    virtual bool ExportImage(const wgpu::Texture& texture,
+                             VkImageLayout layout,
+                             ExternalImageExportInfoVkForTesting* exportInfo) = 0;
+};
+
+struct ExternalImageDescriptorVkForTesting : public ExternalImageDescriptorVk {
+  public:
+    ExternalImageDescriptorVkForTesting();
+};
+
+struct ExternalImageExportInfoVkForTesting : public ExternalImageExportInfoVk {
+  public:
+    ExternalImageExportInfoVkForTesting();
+    std::vector<std::unique_ptr<VulkanImageWrappingTestBackend::ExternalSemaphore>> semaphores;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
index 9ef7c3a..cccaadc 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
@@ -26,169 +26,161 @@
 
 namespace dawn::native::vulkan {
 
-    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
-        : ExternalImageDescriptorVk(ExternalImageType::DmaBuf) {
+ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+    : ExternalImageDescriptorVk(ExternalImageType::DmaBuf) {}
+ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+    : ExternalImageExportInfoVk(ExternalImageType::DmaBuf) {}
+
+class ExternalSemaphoreDmaBuf : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+  public:
+    explicit ExternalSemaphoreDmaBuf(int handle) : mHandle(handle) {}
+    ~ExternalSemaphoreDmaBuf() override {
+        if (mHandle != -1) {
+            close(mHandle);
+        }
     }
-    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
-        : ExternalImageExportInfoVk(ExternalImageType::DmaBuf) {
+    int AcquireHandle() {
+        int handle = mHandle;
+        mHandle = -1;
+        return handle;
     }
 
-    class ExternalSemaphoreDmaBuf : public VulkanImageWrappingTestBackend::ExternalSemaphore {
-      public:
-        explicit ExternalSemaphoreDmaBuf(int handle) : mHandle(handle) {
+  private:
+    int mHandle = -1;
+};
+
+class ExternalTextureDmaBuf : public VulkanImageWrappingTestBackend::ExternalTexture {
+  public:
+    ExternalTextureDmaBuf(gbm_bo* bo, int fd, uint32_t stride, uint64_t drmModifier)
+        : mGbmBo(bo), mFd(fd), stride(stride), drmModifier(drmModifier) {}
+
+    ~ExternalTextureDmaBuf() override {
+        if (mFd != -1) {
+            close(mFd);
         }
-        ~ExternalSemaphoreDmaBuf() override {
-            if (mHandle != -1) {
-                close(mHandle);
-            }
+        if (mGbmBo != nullptr) {
+            gbm_bo_destroy(mGbmBo);
         }
-        int AcquireHandle() {
-            int handle = mHandle;
-            mHandle = -1;
-            return handle;
-        }
-
-      private:
-        int mHandle = -1;
-    };
-
-    class ExternalTextureDmaBuf : public VulkanImageWrappingTestBackend::ExternalTexture {
-      public:
-        ExternalTextureDmaBuf(gbm_bo* bo, int fd, uint32_t stride, uint64_t drmModifier)
-            : mGbmBo(bo), mFd(fd), stride(stride), drmModifier(drmModifier) {
-        }
-
-        ~ExternalTextureDmaBuf() override {
-            if (mFd != -1) {
-                close(mFd);
-            }
-            if (mGbmBo != nullptr) {
-                gbm_bo_destroy(mGbmBo);
-            }
-        }
-
-        int Dup() const {
-            return dup(mFd);
-        }
-
-      private:
-        gbm_bo* mGbmBo = nullptr;
-        int mFd = -1;
-
-      public:
-        const uint32_t stride;
-        const uint64_t drmModifier;
-    };
-
-    class VulkanImageWrappingTestBackendDmaBuf : public VulkanImageWrappingTestBackend {
-      public:
-        explicit VulkanImageWrappingTestBackendDmaBuf(const wgpu::Device& device) {
-        }
-
-        ~VulkanImageWrappingTestBackendDmaBuf() {
-            if (mGbmDevice != nullptr) {
-                gbm_device_destroy(mGbmDevice);
-                mGbmDevice = nullptr;
-            }
-        }
-
-        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                       uint32_t height,
-                                                       wgpu::TextureFormat format,
-                                                       wgpu::TextureUsage usage) override {
-            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
-
-            gbm_bo* bo = CreateGbmBo(width, height, true);
-
-            return std::make_unique<ExternalTextureDmaBuf>(
-                bo, gbm_bo_get_fd(bo), gbm_bo_get_stride_for_plane(bo, 0), gbm_bo_get_modifier(bo));
-        }
-
-        wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
-            const ExternalTextureDmaBuf* textureDmaBuf =
-                static_cast<const ExternalTextureDmaBuf*>(texture);
-            std::vector<int> waitFDs;
-            for (auto& semaphore : semaphores) {
-                waitFDs.push_back(
-                    static_cast<ExternalSemaphoreDmaBuf*>(semaphore.get())->AcquireHandle());
-            }
-
-            ExternalImageDescriptorDmaBuf descriptorDmaBuf;
-            *static_cast<ExternalImageDescriptorVk*>(&descriptorDmaBuf) = descriptor;
-
-            descriptorDmaBuf.memoryFD = textureDmaBuf->Dup();
-            descriptorDmaBuf.waitFDs = std::move(waitFDs);
-
-            descriptorDmaBuf.stride = textureDmaBuf->stride;
-            descriptorDmaBuf.drmModifier = textureDmaBuf->drmModifier;
-
-            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorDmaBuf);
-        }
-
-        bool ExportImage(const wgpu::Texture& texture,
-                         VkImageLayout layout,
-                         ExternalImageExportInfoVkForTesting* exportInfo) override {
-            ExternalImageExportInfoDmaBuf infoDmaBuf;
-            bool success = ExportVulkanImage(texture.Get(), layout, &infoDmaBuf);
-
-            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoDmaBuf;
-            for (int fd : infoDmaBuf.semaphoreHandles) {
-                EXPECT_NE(fd, -1);
-                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreDmaBuf>(fd));
-            }
-
-            return success;
-        }
-
-        void CreateGbmDevice() {
-            // Render nodes [1] are the primary interface for communicating with the GPU on
-            // devices that support DRM. The actual filename of the render node is
-            // implementation-specific, so we must scan through all possible filenames to find
-            // one that we can use [2].
-            //
-            // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
-            // [2]
-            // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
-            const uint32_t kRenderNodeStart = 128;
-            const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
-            const std::string kRenderNodeTemplate = "/dev/dri/renderD";
-
-            int renderNodeFd = -1;
-            for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
-                std::string renderNode = kRenderNodeTemplate + std::to_string(i);
-                renderNodeFd = open(renderNode.c_str(), O_RDWR);
-                if (renderNodeFd >= 0)
-                    break;
-            }
-            EXPECT_GE(renderNodeFd, 0) << "Failed to get file descriptor for render node";
-
-            gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
-            EXPECT_NE(gbmDevice, nullptr) << "Failed to create GBM device";
-            mGbmDevice = gbmDevice;
-        }
-
-      private:
-        gbm_bo* CreateGbmBo(uint32_t width, uint32_t height, bool linear) {
-            uint32_t flags = GBM_BO_USE_RENDERING;
-            if (linear)
-                flags |= GBM_BO_USE_LINEAR;
-            gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, width, height, GBM_FORMAT_XBGR8888, flags);
-            EXPECT_NE(gbmBo, nullptr) << "Failed to create GBM buffer object";
-            return gbmBo;
-        }
-
-        gbm_device* mGbmDevice = nullptr;
-    };
-
-    // static
-    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
-        const wgpu::Device& device) {
-        auto backend = std::make_unique<VulkanImageWrappingTestBackendDmaBuf>(device);
-        backend->CreateGbmDevice();
-        return backend;
     }
+
+    int Dup() const { return dup(mFd); }
+
+  private:
+    gbm_bo* mGbmBo = nullptr;
+    int mFd = -1;
+
+  public:
+    const uint32_t stride;
+    const uint64_t drmModifier;
+};
+
+class VulkanImageWrappingTestBackendDmaBuf : public VulkanImageWrappingTestBackend {
+  public:
+    explicit VulkanImageWrappingTestBackendDmaBuf(const wgpu::Device& device) {}
+
+    ~VulkanImageWrappingTestBackendDmaBuf() {
+        if (mGbmDevice != nullptr) {
+            gbm_device_destroy(mGbmDevice);
+            mGbmDevice = nullptr;
+        }
+    }
+
+    std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                   uint32_t height,
+                                                   wgpu::TextureFormat format,
+                                                   wgpu::TextureUsage usage) override {
+        EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+
+        gbm_bo* bo = CreateGbmBo(width, height, true);
+
+        return std::make_unique<ExternalTextureDmaBuf>(
+            bo, gbm_bo_get_fd(bo), gbm_bo_get_stride_for_plane(bo, 0), gbm_bo_get_modifier(bo));
+    }
+
+    wgpu::Texture WrapImage(const wgpu::Device& device,
+                            const ExternalTexture* texture,
+                            const ExternalImageDescriptorVkForTesting& descriptor,
+                            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+        const ExternalTextureDmaBuf* textureDmaBuf =
+            static_cast<const ExternalTextureDmaBuf*>(texture);
+        std::vector<int> waitFDs;
+        for (auto& semaphore : semaphores) {
+            waitFDs.push_back(
+                static_cast<ExternalSemaphoreDmaBuf*>(semaphore.get())->AcquireHandle());
+        }
+
+        ExternalImageDescriptorDmaBuf descriptorDmaBuf;
+        *static_cast<ExternalImageDescriptorVk*>(&descriptorDmaBuf) = descriptor;
+
+        descriptorDmaBuf.memoryFD = textureDmaBuf->Dup();
+        descriptorDmaBuf.waitFDs = std::move(waitFDs);
+
+        descriptorDmaBuf.stride = textureDmaBuf->stride;
+        descriptorDmaBuf.drmModifier = textureDmaBuf->drmModifier;
+
+        return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorDmaBuf);
+    }
+
+    bool ExportImage(const wgpu::Texture& texture,
+                     VkImageLayout layout,
+                     ExternalImageExportInfoVkForTesting* exportInfo) override {
+        ExternalImageExportInfoDmaBuf infoDmaBuf;
+        bool success = ExportVulkanImage(texture.Get(), layout, &infoDmaBuf);
+
+        *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoDmaBuf;
+        for (int fd : infoDmaBuf.semaphoreHandles) {
+            EXPECT_NE(fd, -1);
+            exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreDmaBuf>(fd));
+        }
+
+        return success;
+    }
+
+    void CreateGbmDevice() {
+        // Render nodes [1] are the primary interface for communicating with the GPU on
+        // devices that support DRM. The actual filename of the render node is
+        // implementation-specific, so we must scan through all possible filenames to find
+        // one that we can use [2].
+        //
+        // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
+        // [2]
+        // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
+        const uint32_t kRenderNodeStart = 128;
+        const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
+        const std::string kRenderNodeTemplate = "/dev/dri/renderD";
+
+        int renderNodeFd = -1;
+        for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
+            std::string renderNode = kRenderNodeTemplate + std::to_string(i);
+            renderNodeFd = open(renderNode.c_str(), O_RDWR);
+            if (renderNodeFd >= 0)
+                break;
+        }
+        EXPECT_GE(renderNodeFd, 0) << "Failed to get file descriptor for render node";
+
+        gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
+        EXPECT_NE(gbmDevice, nullptr) << "Failed to create GBM device";
+        mGbmDevice = gbmDevice;
+    }
+
+  private:
+    gbm_bo* CreateGbmBo(uint32_t width, uint32_t height, bool linear) {
+        uint32_t flags = GBM_BO_USE_RENDERING;
+        if (linear)
+            flags |= GBM_BO_USE_LINEAR;
+        gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, width, height, GBM_FORMAT_XBGR8888, flags);
+        EXPECT_NE(gbmBo, nullptr) << "Failed to create GBM buffer object";
+        return gbmBo;
+    }
+
+    gbm_device* mGbmDevice = nullptr;
+};
+
+// static
+std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+    const wgpu::Device& device) {
+    auto backend = std::make_unique<VulkanImageWrappingTestBackendDmaBuf>(device);
+    backend->CreateGbmDevice();
+    return backend;
+}
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
index 9393c06..c92181c 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
@@ -26,256 +26,246 @@
 
 namespace dawn::native::vulkan {
 
-    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
-        : ExternalImageDescriptorVk(ExternalImageType::OpaqueFD) {
+ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+    : ExternalImageDescriptorVk(ExternalImageType::OpaqueFD) {}
+ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+    : ExternalImageExportInfoVk(ExternalImageType::OpaqueFD) {}
+
+class ExternalSemaphoreOpaqueFD : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+  public:
+    explicit ExternalSemaphoreOpaqueFD(int handle) : mHandle(handle) {}
+    ~ExternalSemaphoreOpaqueFD() override {
+        if (mHandle != -1) {
+            close(mHandle);
+        }
     }
-    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
-        : ExternalImageExportInfoVk(ExternalImageType::OpaqueFD) {
+    int AcquireHandle() {
+        int handle = mHandle;
+        mHandle = -1;
+        return handle;
     }
 
-    class ExternalSemaphoreOpaqueFD : public VulkanImageWrappingTestBackend::ExternalSemaphore {
-      public:
-        explicit ExternalSemaphoreOpaqueFD(int handle) : mHandle(handle) {
+  private:
+    int mHandle = -1;
+};
+
+class ExternalTextureOpaqueFD : public VulkanImageWrappingTestBackend::ExternalTexture {
+  public:
+    ExternalTextureOpaqueFD(dawn::native::vulkan::Device* device,
+                            int fd,
+                            VkDeviceMemory allocation,
+                            VkImage handle,
+                            VkDeviceSize allocationSize,
+                            uint32_t memoryTypeIndex)
+        : mDevice(device),
+          mFd(fd),
+          mAllocation(allocation),
+          mHandle(handle),
+          allocationSize(allocationSize),
+          memoryTypeIndex(memoryTypeIndex) {}
+
+    ~ExternalTextureOpaqueFD() override {
+        if (mFd != -1) {
+            close(mFd);
         }
-        ~ExternalSemaphoreOpaqueFD() override {
-            if (mHandle != -1) {
-                close(mHandle);
-            }
+        if (mAllocation != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mAllocation);
         }
-        int AcquireHandle() {
-            int handle = mHandle;
-            mHandle = -1;
-            return handle;
+        if (mHandle != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        }
+    }
+
+    int Dup() const { return dup(mFd); }
+
+  private:
+    dawn::native::vulkan::Device* mDevice;
+    int mFd = -1;
+    VkDeviceMemory mAllocation = VK_NULL_HANDLE;
+    VkImage mHandle = VK_NULL_HANDLE;
+
+  public:
+    const VkDeviceSize allocationSize;
+    const uint32_t memoryTypeIndex;
+};
+
+class VulkanImageWrappingTestBackendOpaqueFD : public VulkanImageWrappingTestBackend {
+  public:
+    explicit VulkanImageWrappingTestBackendOpaqueFD(const wgpu::Device& device) : mDevice(device) {
+        mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+    }
+
+    std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                   uint32_t height,
+                                                   wgpu::TextureFormat format,
+                                                   wgpu::TextureUsage usage) override {
+        EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+        VkFormat vulkanFormat = VK_FORMAT_R8G8B8A8_UNORM;
+
+        VkImage handle;
+        ::VkResult result = CreateImage(mDeviceVk, width, height, vulkanFormat, &handle);
+        EXPECT_EQ(result, VK_SUCCESS) << "Failed to create external image";
+
+        VkDeviceMemory allocation;
+        VkDeviceSize allocationSize;
+        uint32_t memoryTypeIndex;
+        ::VkResult resultBool =
+            AllocateMemory(mDeviceVk, handle, &allocation, &allocationSize, &memoryTypeIndex);
+        EXPECT_EQ(resultBool, VK_SUCCESS) << "Failed to allocate external memory";
+
+        result = BindMemory(mDeviceVk, handle, allocation);
+        EXPECT_EQ(result, VK_SUCCESS) << "Failed to bind image memory";
+
+        int fd = GetMemoryFd(mDeviceVk, allocation);
+
+        return std::make_unique<ExternalTextureOpaqueFD>(mDeviceVk, fd, allocation, handle,
+                                                         allocationSize, memoryTypeIndex);
+    }
+
+    wgpu::Texture WrapImage(const wgpu::Device& device,
+                            const ExternalTexture* texture,
+                            const ExternalImageDescriptorVkForTesting& descriptor,
+                            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+        const ExternalTextureOpaqueFD* textureOpaqueFD =
+            static_cast<const ExternalTextureOpaqueFD*>(texture);
+        std::vector<int> waitFDs;
+        for (auto& semaphore : semaphores) {
+            waitFDs.push_back(
+                static_cast<ExternalSemaphoreOpaqueFD*>(semaphore.get())->AcquireHandle());
         }
 
-      private:
-        int mHandle = -1;
-    };
+        ExternalImageDescriptorOpaqueFD descriptorOpaqueFD;
+        *static_cast<ExternalImageDescriptorVk*>(&descriptorOpaqueFD) = descriptor;
+        descriptorOpaqueFD.memoryFD = textureOpaqueFD->Dup();
+        descriptorOpaqueFD.allocationSize = textureOpaqueFD->allocationSize;
+        descriptorOpaqueFD.memoryTypeIndex = textureOpaqueFD->memoryTypeIndex;
+        descriptorOpaqueFD.waitFDs = std::move(waitFDs);
 
-    class ExternalTextureOpaqueFD : public VulkanImageWrappingTestBackend::ExternalTexture {
-      public:
-        ExternalTextureOpaqueFD(dawn::native::vulkan::Device* device,
-                                int fd,
-                                VkDeviceMemory allocation,
-                                VkImage handle,
-                                VkDeviceSize allocationSize,
-                                uint32_t memoryTypeIndex)
-            : mDevice(device),
-              mFd(fd),
-              mAllocation(allocation),
-              mHandle(handle),
-              allocationSize(allocationSize),
-              memoryTypeIndex(memoryTypeIndex) {
+        return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorOpaqueFD);
+    }
+
+    bool ExportImage(const wgpu::Texture& texture,
+                     VkImageLayout layout,
+                     ExternalImageExportInfoVkForTesting* exportInfo) override {
+        ExternalImageExportInfoOpaqueFD infoOpaqueFD;
+        bool success = ExportVulkanImage(texture.Get(), layout, &infoOpaqueFD);
+
+        *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoOpaqueFD;
+        for (int fd : infoOpaqueFD.semaphoreHandles) {
+            EXPECT_NE(fd, -1);
+            exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreOpaqueFD>(fd));
         }
 
-        ~ExternalTextureOpaqueFD() override {
-            if (mFd != -1) {
-                close(mFd);
-            }
-            if (mAllocation != VK_NULL_HANDLE) {
-                mDevice->GetFencedDeleter()->DeleteWhenUnused(mAllocation);
-            }
-            if (mHandle != VK_NULL_HANDLE) {
-                mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            }
-        }
+        return success;
+    }
 
-        int Dup() const {
-            return dup(mFd);
-        }
+  private:
+    // Creates a VkImage with external memory
+    ::VkResult CreateImage(dawn::native::vulkan::Device* deviceVk,
+                           uint32_t width,
+                           uint32_t height,
+                           VkFormat format,
+                           VkImage* image) {
+        VkExternalMemoryImageCreateInfoKHR externalInfo;
+        externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
+        externalInfo.pNext = nullptr;
+        externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-      private:
-        dawn::native::vulkan::Device* mDevice;
-        int mFd = -1;
-        VkDeviceMemory mAllocation = VK_NULL_HANDLE;
-        VkImage mHandle = VK_NULL_HANDLE;
+        auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+                     VK_IMAGE_USAGE_TRANSFER_DST_BIT;
 
-      public:
-        const VkDeviceSize allocationSize;
-        const uint32_t memoryTypeIndex;
-    };
+        VkImageCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+        createInfo.pNext = &externalInfo;
+        createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+        createInfo.imageType = VK_IMAGE_TYPE_2D;
+        createInfo.format = format;
+        createInfo.extent = {width, height, 1};
+        createInfo.mipLevels = 1;
+        createInfo.arrayLayers = 1;
+        createInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+        createInfo.usage = usage;
+        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = nullptr;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
 
-    class VulkanImageWrappingTestBackendOpaqueFD : public VulkanImageWrappingTestBackend {
-      public:
-        explicit VulkanImageWrappingTestBackendOpaqueFD(const wgpu::Device& device)
-            : mDevice(device) {
-            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
-        }
+        return deviceVk->fn.CreateImage(deviceVk->GetVkDevice(), &createInfo, nullptr, &**image);
+    }
 
-        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                       uint32_t height,
-                                                       wgpu::TextureFormat format,
-                                                       wgpu::TextureUsage usage) override {
-            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
-            VkFormat vulkanFormat = VK_FORMAT_R8G8B8A8_UNORM;
+    // Allocates memory for an image
+    ::VkResult AllocateMemory(dawn::native::vulkan::Device* deviceVk,
+                              VkImage handle,
+                              VkDeviceMemory* allocation,
+                              VkDeviceSize* allocationSize,
+                              uint32_t* memoryTypeIndex) {
+        // Create the image memory and associate it with the container
+        VkMemoryRequirements requirements;
+        deviceVk->fn.GetImageMemoryRequirements(deviceVk->GetVkDevice(), handle, &requirements);
 
-            VkImage handle;
-            ::VkResult result = CreateImage(mDeviceVk, width, height, vulkanFormat, &handle);
-            EXPECT_EQ(result, VK_SUCCESS) << "Failed to create external image";
+        // Import memory from file descriptor
+        VkExportMemoryAllocateInfoKHR externalInfo;
+        externalInfo.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
+        externalInfo.pNext = nullptr;
+        externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-            VkDeviceMemory allocation;
-            VkDeviceSize allocationSize;
-            uint32_t memoryTypeIndex;
-            ::VkResult resultBool =
-                AllocateMemory(mDeviceVk, handle, &allocation, &allocationSize, &memoryTypeIndex);
-            EXPECT_EQ(resultBool, VK_SUCCESS) << "Failed to allocate external memory";
+        int bestType = deviceVk->GetResourceMemoryAllocator()->FindBestTypeIndex(
+            requirements, MemoryKind::Opaque);
+        VkMemoryAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        allocateInfo.pNext = &externalInfo;
+        allocateInfo.allocationSize = requirements.size;
+        allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
 
-            result = BindMemory(mDeviceVk, handle, allocation);
-            EXPECT_EQ(result, VK_SUCCESS) << "Failed to bind image memory";
+        *allocationSize = allocateInfo.allocationSize;
+        *memoryTypeIndex = allocateInfo.memoryTypeIndex;
 
-            int fd = GetMemoryFd(mDeviceVk, allocation);
+        return deviceVk->fn.AllocateMemory(deviceVk->GetVkDevice(), &allocateInfo, nullptr,
+                                           &**allocation);
+    }
 
-            return std::make_unique<ExternalTextureOpaqueFD>(mDeviceVk, fd, allocation, handle,
-                                                             allocationSize, memoryTypeIndex);
-        }
+    // Binds memory to an image
+    ::VkResult BindMemory(dawn::native::vulkan::Device* deviceVk,
+                          VkImage handle,
+                          VkDeviceMemory memory) {
+        return deviceVk->fn.BindImageMemory(deviceVk->GetVkDevice(), handle, memory, 0);
+    }
 
-        wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
-            const ExternalTextureOpaqueFD* textureOpaqueFD =
-                static_cast<const ExternalTextureOpaqueFD*>(texture);
-            std::vector<int> waitFDs;
-            for (auto& semaphore : semaphores) {
-                waitFDs.push_back(
-                    static_cast<ExternalSemaphoreOpaqueFD*>(semaphore.get())->AcquireHandle());
-            }
+    // Extracts a file descriptor representing memory on a device
+    int GetMemoryFd(dawn::native::vulkan::Device* deviceVk, VkDeviceMemory memory) {
+        VkMemoryGetFdInfoKHR getFdInfo;
+        getFdInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+        getFdInfo.pNext = nullptr;
+        getFdInfo.memory = memory;
+        getFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-            ExternalImageDescriptorOpaqueFD descriptorOpaqueFD;
-            *static_cast<ExternalImageDescriptorVk*>(&descriptorOpaqueFD) = descriptor;
-            descriptorOpaqueFD.memoryFD = textureOpaqueFD->Dup();
-            descriptorOpaqueFD.allocationSize = textureOpaqueFD->allocationSize;
-            descriptorOpaqueFD.memoryTypeIndex = textureOpaqueFD->memoryTypeIndex;
-            descriptorOpaqueFD.waitFDs = std::move(waitFDs);
+        int memoryFd = -1;
+        deviceVk->fn.GetMemoryFdKHR(deviceVk->GetVkDevice(), &getFdInfo, &memoryFd);
 
-            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorOpaqueFD);
-        }
+        EXPECT_GE(memoryFd, 0) << "Failed to get file descriptor for external memory";
+        return memoryFd;
+    }
 
-        bool ExportImage(const wgpu::Texture& texture,
-                         VkImageLayout layout,
-                         ExternalImageExportInfoVkForTesting* exportInfo) override {
-            ExternalImageExportInfoOpaqueFD infoOpaqueFD;
-            bool success = ExportVulkanImage(texture.Get(), layout, &infoOpaqueFD);
-
-            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoOpaqueFD;
-            for (int fd : infoOpaqueFD.semaphoreHandles) {
-                EXPECT_NE(fd, -1);
-                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreOpaqueFD>(fd));
-            }
-
-            return success;
-        }
-
-      private:
-        // Creates a VkImage with external memory
-        ::VkResult CreateImage(dawn::native::vulkan::Device* deviceVk,
+    // Prepares and exports memory for an image on a given device
+    void CreateBindExportImage(dawn::native::vulkan::Device* deviceVk,
                                uint32_t width,
                                uint32_t height,
                                VkFormat format,
-                               VkImage* image) {
-            VkExternalMemoryImageCreateInfoKHR externalInfo;
-            externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
-            externalInfo.pNext = nullptr;
-            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+                               VkImage* handle,
+                               VkDeviceMemory* allocation,
+                               VkDeviceSize* allocationSize,
+                               uint32_t* memoryTypeIndex,
+                               int* memoryFd) {}
 
-            auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
-                         VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+    wgpu::Device mDevice;
+    dawn::native::vulkan::Device* mDeviceVk;
+};
 
-            VkImageCreateInfo createInfo;
-            createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
-            createInfo.pNext = &externalInfo;
-            createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
-            createInfo.imageType = VK_IMAGE_TYPE_2D;
-            createInfo.format = format;
-            createInfo.extent = {width, height, 1};
-            createInfo.mipLevels = 1;
-            createInfo.arrayLayers = 1;
-            createInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-            createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-            createInfo.usage = usage;
-            createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-            createInfo.queueFamilyIndexCount = 0;
-            createInfo.pQueueFamilyIndices = nullptr;
-            createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
-            return deviceVk->fn.CreateImage(deviceVk->GetVkDevice(), &createInfo, nullptr,
-                                            &**image);
-        }
-
-        // Allocates memory for an image
-        ::VkResult AllocateMemory(dawn::native::vulkan::Device* deviceVk,
-                                  VkImage handle,
-                                  VkDeviceMemory* allocation,
-                                  VkDeviceSize* allocationSize,
-                                  uint32_t* memoryTypeIndex) {
-            // Create the image memory and associate it with the container
-            VkMemoryRequirements requirements;
-            deviceVk->fn.GetImageMemoryRequirements(deviceVk->GetVkDevice(), handle, &requirements);
-
-            // Import memory from file descriptor
-            VkExportMemoryAllocateInfoKHR externalInfo;
-            externalInfo.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
-            externalInfo.pNext = nullptr;
-            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
-            int bestType = deviceVk->GetResourceMemoryAllocator()->FindBestTypeIndex(
-                requirements, MemoryKind::Opaque);
-            VkMemoryAllocateInfo allocateInfo;
-            allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-            allocateInfo.pNext = &externalInfo;
-            allocateInfo.allocationSize = requirements.size;
-            allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
-
-            *allocationSize = allocateInfo.allocationSize;
-            *memoryTypeIndex = allocateInfo.memoryTypeIndex;
-
-            return deviceVk->fn.AllocateMemory(deviceVk->GetVkDevice(), &allocateInfo, nullptr,
-                                               &**allocation);
-        }
-
-        // Binds memory to an image
-        ::VkResult BindMemory(dawn::native::vulkan::Device* deviceVk,
-                              VkImage handle,
-                              VkDeviceMemory memory) {
-            return deviceVk->fn.BindImageMemory(deviceVk->GetVkDevice(), handle, memory, 0);
-        }
-
-        // Extracts a file descriptor representing memory on a device
-        int GetMemoryFd(dawn::native::vulkan::Device* deviceVk, VkDeviceMemory memory) {
-            VkMemoryGetFdInfoKHR getFdInfo;
-            getFdInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
-            getFdInfo.pNext = nullptr;
-            getFdInfo.memory = memory;
-            getFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
-            int memoryFd = -1;
-            deviceVk->fn.GetMemoryFdKHR(deviceVk->GetVkDevice(), &getFdInfo, &memoryFd);
-
-            EXPECT_GE(memoryFd, 0) << "Failed to get file descriptor for external memory";
-            return memoryFd;
-        }
-
-        // Prepares and exports memory for an image on a given device
-        void CreateBindExportImage(dawn::native::vulkan::Device* deviceVk,
-                                   uint32_t width,
-                                   uint32_t height,
-                                   VkFormat format,
-                                   VkImage* handle,
-                                   VkDeviceMemory* allocation,
-                                   VkDeviceSize* allocationSize,
-                                   uint32_t* memoryTypeIndex,
-                                   int* memoryFd) {
-        }
-
-        wgpu::Device mDevice;
-        dawn::native::vulkan::Device* mDeviceVk;
-    };
-
-    // static
-    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
-        const wgpu::Device& device) {
-        return std::make_unique<VulkanImageWrappingTestBackendOpaqueFD>(device);
-    }
+// static
+std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+    const wgpu::Device& device) {
+    return std::make_unique<VulkanImageWrappingTestBackendOpaqueFD>(device);
+}
 
 }  // namespace dawn::native::vulkan